From 576c16dc4baa0676a402a117556af21ec4d7e291 Mon Sep 17 00:00:00 2001 From: sadiq1971 Date: Sat, 14 Mar 2026 02:15:32 +0600 Subject: [PATCH 1/5] feat: indexer fetcher --- pkg/cantonsdk/streaming/client.go | 262 +++++++++++++++++++++++++++++ pkg/cantonsdk/streaming/options.go | 25 +++ pkg/cantonsdk/streaming/types.go | 119 +++++++++++++ pkg/cantonsdk/values/decode.go | 67 ++++++++ pkg/indexer/fetcher.go | 85 ++++++++++ pkg/indexer/parser.go | 167 ++++++++++++++++++ pkg/indexer/types.go | 87 ++++++++++ 7 files changed, 812 insertions(+) create mode 100644 pkg/cantonsdk/streaming/client.go create mode 100644 pkg/cantonsdk/streaming/options.go create mode 100644 pkg/cantonsdk/streaming/types.go create mode 100644 pkg/indexer/fetcher.go create mode 100644 pkg/indexer/parser.go create mode 100644 pkg/indexer/types.go diff --git a/pkg/cantonsdk/streaming/client.go b/pkg/cantonsdk/streaming/client.go new file mode 100644 index 00000000..79154a37 --- /dev/null +++ b/pkg/cantonsdk/streaming/client.go @@ -0,0 +1,262 @@ +package streaming + +import ( + "context" + "errors" + "fmt" + "io" + "sync/atomic" + "time" + + lapiv2 "github.com/chainsafe/canton-middleware/pkg/cantonsdk/lapi/v2" + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/ledger" + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/values" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + reconnectBaseDelay = 5 * time.Second + reconnectMaxDelay = 60 * time.Second + txChannelCap = 100 +) + +// Streamer is the interface for opening a live Canton ledger stream. +// *Client satisfies this interface. +type Streamer interface { + Subscribe(ctx context.Context, req SubscribeRequest, lastOffset *int64) <-chan *LedgerTransaction +} + +// Client wraps UpdateService.GetUpdates with automatic reconnection and auth handling. +// It mirrors the streaming pattern established in pkg/cantonsdk/bridge/client.go. +type Client struct { + ledger ledger.Ledger + party string + logger *zap.Logger +} + +// New creates a new streaming Client for the given ledger and party. +func New(l ledger.Ledger, party string, opts ...Option) *Client { + s := applyOptions(opts) + return &Client{ + ledger: l, + party: party, + logger: s.logger, + } +} + +// Subscribe opens a live stream against the Canton ledger and returns a read-only channel +// of decoded transactions. It reconnects automatically with exponential backoff (5s → 60s) +// on transient errors, and invalidates the auth token on 401/403. +// +// lastOffset is updated atomically after each received transaction so that reconnects +// resume from the last safely received point. The caller is responsible for persisting +// lastOffset to the database (the processor does this atomically with event writes). +// +// The returned channel is closed when ctx is cancelled or a terminal error occurs +// (io.EOF, context cancellation). +func (c *Client) Subscribe( + ctx context.Context, + req SubscribeRequest, + lastOffset *int64, +) <-chan *LedgerTransaction { + out := make(chan *LedgerTransaction, txChannelCap) + + go func() { + defer close(out) + + reconnectDelay := reconnectBaseDelay + + for { + select { + case <-ctx.Done(): + return + default: + } + + err := c.runStream(ctx, req.FromOffset, req.TemplateIDs, lastOffset, out) + if err == nil || errors.Is(err, io.EOF) || ctx.Err() != nil { + return + } + + if isAuthError(err) { + c.ledger.InvalidateToken() + reconnectDelay = reconnectBaseDelay + } + + // Advance FromOffset to where the stream last successfully delivered a + // transaction so the next connection resumes from the correct position. + req.FromOffset = atomic.LoadInt64(lastOffset) + + c.logger.Warn("canton stream disconnected, reconnecting", + zap.Error(err), + zap.Int64("resume_offset", req.FromOffset), + zap.Duration("backoff", reconnectDelay), + ) + + select { + case <-ctx.Done(): + return + case <-time.After(reconnectDelay): + } + + reconnectDelay = min(reconnectDelay*2, reconnectMaxDelay) + } + }() + + return out +} + +// runStream opens a single GetUpdates stream and forwards transactions to out until +// the stream ends or ctx is cancelled. It updates lastOffset atomically on each +// received transaction. +func (c *Client) runStream( + ctx context.Context, + fromOffset int64, + templateIDs []TemplateID, + lastOffset *int64, + out chan<- *LedgerTransaction, +) error { + authCtx := c.ledger.AuthContext(ctx) + + stream, err := c.ledger.Update().GetUpdates(authCtx, &lapiv2.GetUpdatesRequest{ + BeginExclusive: fromOffset, + UpdateFormat: &lapiv2.UpdateFormat{ + IncludeTransactions: &lapiv2.TransactionFormat{ + EventFormat: &lapiv2.EventFormat{ + FiltersByParty: map[string]*lapiv2.Filters{ + c.party: buildTemplateFilters(templateIDs), + }, + Verbose: true, + }, + TransactionShape: lapiv2.TransactionShape_TRANSACTION_SHAPE_ACS_DELTA, + }, + }, + }) + if err != nil { + if isAuthError(err) { + c.ledger.InvalidateToken() + } + return fmt.Errorf("open canton stream: %w", err) + } + + for { + resp, err := stream.Recv() + if err != nil { + if isAuthError(err) { + c.ledger.InvalidateToken() + } + return err + } + + tx := resp.GetTransaction() + if tx == nil { + // Checkpoint or topology update — nothing to index. + continue + } + + lt := decodeLedgerTransaction(tx) + atomic.StoreInt64(lastOffset, lt.Offset) + + select { + case out <- lt: + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// buildTemplateFilters constructs the Filters value for a set of TemplateIDs. +// +// This is the gRPC-level (template-level) filter. It controls which contract types +// are delivered by the Canton Ledger API — reducing bandwidth to only the requested +// templates. It is NOT an instrument filter: it cannot filter by contract field values +// such as instrumentId. Instrument filtering (by InstrumentKey{Admin, ID}) happens +// downstream in the parser after events are received. +// +// When TemplateID.PackageID is empty the filter matches the template across all +// deployed package versions. Setting PackageID="" for CIP56.Events.TokenTransferEvent +// enables indexing of any third-party CIP56-compliant token regardless of which +// package version it was deployed with. +func buildTemplateFilters(templateIDs []TemplateID) *lapiv2.Filters { + cumulative := make([]*lapiv2.CumulativeFilter, 0, len(templateIDs)) + for _, tid := range templateIDs { + cumulative = append(cumulative, &lapiv2.CumulativeFilter{ + IdentifierFilter: &lapiv2.CumulativeFilter_TemplateFilter{ + TemplateFilter: &lapiv2.TemplateFilter{ + TemplateId: &lapiv2.Identifier{ + PackageId: tid.PackageID, // empty = match all package versions + ModuleName: tid.ModuleName, + EntityName: tid.EntityName, + }, + }, + }, + }) + } + return &lapiv2.Filters{Cumulative: cumulative} +} + +// decodeLedgerTransaction converts a proto Transaction into a LedgerTransaction. +func decodeLedgerTransaction(tx *lapiv2.Transaction) *LedgerTransaction { + lt := &LedgerTransaction{ + UpdateID: tx.GetUpdateId(), + Offset: tx.GetOffset(), + EffectiveTime: tx.GetEffectiveAt().AsTime(), + Events: make([]*LedgerEvent, 0, len(tx.Events)), + } + for _, ev := range tx.Events { + if le := decodeLedgerEvent(ev); le != nil { + lt.Events = append(lt.Events, le) + } + } + return lt +} + +// decodeLedgerEvent converts a proto Event to a LedgerEvent. +// For created events the DAML CreateArguments are pre-decoded into LedgerEvent.fields +// so that callers never need to import lapiv2 directly. +// Returns nil for event kinds the indexer does not process (e.g. exercised events). +func decodeLedgerEvent(ev *lapiv2.Event) *LedgerEvent { + if created := ev.GetCreated(); created != nil { + le := &LedgerEvent{ + ContractID: created.GetContractId(), + IsCreated: true, + fields: values.RecordToMap(created.GetCreateArguments()), + } + if tid := created.GetTemplateId(); tid != nil { + le.PackageID = tid.GetPackageId() + le.ModuleName = tid.GetModuleName() + le.TemplateName = tid.GetEntityName() + } + return le + } + + if archived := ev.GetArchived(); archived != nil { + le := &LedgerEvent{ + ContractID: archived.GetContractId(), + IsCreated: false, + } + if tid := archived.GetTemplateId(); tid != nil { + le.PackageID = tid.GetPackageId() + le.ModuleName = tid.GetModuleName() + le.TemplateName = tid.GetEntityName() + } + return le + } + + return nil +} + +// isAuthError returns true if err signals authentication or authorisation failure. +func isAuthError(err error) bool { + if err == nil { + return false + } + st, ok := status.FromError(err) + if !ok { + return false + } + return st.Code() == codes.Unauthenticated || st.Code() == codes.PermissionDenied +} diff --git a/pkg/cantonsdk/streaming/options.go b/pkg/cantonsdk/streaming/options.go new file mode 100644 index 00000000..56e6eb84 --- /dev/null +++ b/pkg/cantonsdk/streaming/options.go @@ -0,0 +1,25 @@ +package streaming + +import "go.uber.org/zap" + +// Option configures a streaming Client. +type Option func(*settings) + +type settings struct { + logger *zap.Logger +} + +// WithLogger sets a custom logger on the streaming Client. +func WithLogger(l *zap.Logger) Option { + return func(s *settings) { s.logger = l } +} + +func applyOptions(opts []Option) settings { + s := settings{logger: zap.NewNop()} + for _, opt := range opts { + if opt != nil { + opt(&s) + } + } + return s +} diff --git a/pkg/cantonsdk/streaming/types.go b/pkg/cantonsdk/streaming/types.go new file mode 100644 index 00000000..60ee7bb9 --- /dev/null +++ b/pkg/cantonsdk/streaming/types.go @@ -0,0 +1,119 @@ +// Package streaming provides a reusable, generic Canton ledger streaming client. +// +// It wraps UpdateService.GetUpdates with automatic reconnection, exponential backoff, +// and auth-token invalidation on 401. +package streaming + +import ( + "time" + + lapiv2 "github.com/chainsafe/canton-middleware/pkg/cantonsdk/lapi/v2" + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/values" +) + +// TemplateID identifies a DAML template by its package, module, and entity name. +// It is the streaming package's own type — callers do not import lapiv2 directly. +type TemplateID struct { + PackageID string + ModuleName string + EntityName string +} + +// SubscribeRequest configures which templates to stream and from which ledger offset. +type SubscribeRequest struct { + // FromOffset is the exclusive start offset. Use 0 to start from the beginning. + FromOffset int64 + + // TemplateIDs lists the DAML templates to subscribe to. + TemplateIDs []TemplateID +} + +// LedgerTransaction is a decoded transaction received from the Canton GetUpdates stream. +type LedgerTransaction struct { + UpdateID string + Offset int64 + EffectiveTime time.Time + Events []*LedgerEvent +} + +// LedgerEvent is a single created or archived contract event within a transaction. +// All DAML field access goes through typed accessor methods — no lapiv2 types are exposed. +type LedgerEvent struct { + ContractID string + PackageID string + ModuleName string + TemplateName string + + // IsCreated is true for contract create events, false for archive events. + IsCreated bool + + // fields holds the pre-decoded DAML record from CreateArguments, keyed by field label. + // Only populated for created events; nil for archived events. + fields map[string]*lapiv2.Value +} + +// TextField returns the named DAML Text field as a Go string. +// Returns "" when the field is absent or not of type Text. +func (e *LedgerEvent) TextField(name string) string { + return values.Text(e.fields[name]) +} + +// PartyField returns the named DAML Party field as a string. +// Returns "" when the field is absent or not of type Party. +func (e *LedgerEvent) PartyField(name string) string { + return values.Party(e.fields[name]) +} + +// NumericField returns the named DAML Numeric field as a decimal string (e.g. "1.5"). +// Returns "0" when the field is absent or not of type Numeric. +func (e *LedgerEvent) NumericField(name string) string { + return values.Numeric(e.fields[name]) +} + +// OptionalTextField returns the inner Text value of a DAML Optional Text field. +// Returns "" for None or when the field is absent. +func (e *LedgerEvent) OptionalTextField(name string) string { + return values.OptionalText(e.fields[name]) +} + +// OptionalPartyField returns the inner Party value of a DAML Optional Party field. +// Returns "" for None or when the field is absent. +func (e *LedgerEvent) OptionalPartyField(name string) string { + return values.OptionalParty(e.fields[name]) +} + +// IsNone returns true if the named DAML Optional field holds None. +func (e *LedgerEvent) IsNone(name string) bool { + return values.IsNone(e.fields[name]) +} + +// TimestampField returns the named DAML Time field as a Go time.Time. +// Returns zero time when the field is absent or not of type Timestamp. +func (e *LedgerEvent) TimestampField(name string) time.Time { + return values.Timestamp(e.fields[name]) +} + +// NestedTextField accesses a Text sub-field inside a named DAML Record field. +// Example: event.NestedTextField("instrumentId", "id") +// Returns "" when the outer field is absent or not a Record. +func (e *LedgerEvent) NestedTextField(record, field string) string { + return values.NestedTextField(e.fields[record], field) +} + +// NestedPartyField accesses a Party sub-field inside a named DAML Record field. +// Example: event.NestedPartyField("instrumentId", "admin") +// Returns "" when the outer field is absent or not a Record. +func (e *LedgerEvent) NestedPartyField(record, field string) string { + return values.NestedPartyField(e.fields[record], field) +} + +// OptionalMetaLookup looks up a string key within an Optional Metadata field. +// Metadata is encoded as Optional(Record{values: Map Text Text}). +// Returns "" when the Optional is None, the key is absent, or the field is absent. +func (e *LedgerEvent) OptionalMetaLookup(metaField, key string) string { + inner := values.OptionalRecordFields(e.fields[metaField]) + if inner == nil { + return "" + } + return values.MapLookupText(inner["values"], key) +} diff --git a/pkg/cantonsdk/values/decode.go b/pkg/cantonsdk/values/decode.go index 2abb1fed..48d7c80a 100644 --- a/pkg/cantonsdk/values/decode.go +++ b/pkg/cantonsdk/values/decode.go @@ -111,3 +111,70 @@ func Timestamp(v *lapiv2.Value) time.Time { } return time.Time{} } + +// RecordField extracts a named field from a Record value, returning the sub-map. +// Returns nil when v is nil or not a Record. +func RecordField(v *lapiv2.Value) map[string]*lapiv2.Value { + if v == nil { + return nil + } + r, ok := v.Sum.(*lapiv2.Value_Record) + if !ok || r.Record == nil { + return nil + } + return RecordToMap(r.Record) +} + +// NestedTextField accesses a Text field within a nested DAML Record value. +// Use this for fields like instrumentId.id where instrumentId is a Record. +// Returns "" when v is nil, not a Record, or the field is absent. +func NestedTextField(v *lapiv2.Value, field string) string { + return Text(RecordField(v)[field]) +} + +// NestedPartyField accesses a Party field within a nested DAML Record value. +// Use this for fields like instrumentId.admin. +// Returns "" when v is nil, not a Record, or the field is absent. +func NestedPartyField(v *lapiv2.Value, field string) string { + return Party(RecordField(v)[field]) +} + +// OptionalRecordFields extracts the inner Record fields from an Optional(Record) value. +// Returns nil when v is None or the inner value is not a Record. +func OptionalRecordFields(v *lapiv2.Value) map[string]*lapiv2.Value { + if IsNone(v) { + return nil + } + opt, ok := v.Sum.(*lapiv2.Value_Optional) + if !ok || opt.Optional == nil || opt.Optional.Value == nil { + return nil + } + return RecordField(opt.Optional.Value) +} + +// MapLookupText looks up a string key in a DAML Map Text Text value. +// Handles both TextMap (DA.TextMap) and GenMap (DA.Map) encodings. +// Returns "" when v is nil, not a map, or the key is absent. +func MapLookupText(v *lapiv2.Value, key string) string { + if v == nil { + return "" + } + // DA.TextMap.TextMap serialises as Value_TextMap + if tm, ok := v.Sum.(*lapiv2.Value_TextMap); ok && tm.TextMap != nil { + for _, e := range tm.TextMap.Entries { + if e.GetKey() == key { + return Text(e.GetValue()) + } + } + return "" + } + // DA.Map.Map serialises as Value_GenMap with Text keys + if gm, ok := v.Sum.(*lapiv2.Value_GenMap); ok && gm.GenMap != nil { + for _, e := range gm.GenMap.Entries { + if Text(e.GetKey()) == key { + return Text(e.GetValue()) + } + } + } + return "" +} diff --git a/pkg/indexer/fetcher.go b/pkg/indexer/fetcher.go new file mode 100644 index 00000000..bf0fb43f --- /dev/null +++ b/pkg/indexer/fetcher.go @@ -0,0 +1,85 @@ +package indexer + +import ( + "context" + "sync/atomic" + + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + + "go.uber.org/zap" +) + +const txChannelCap = 100 + +// Fetcher opens a live Canton stream from a caller-supplied resume offset and +// exposes the resulting transactions via Events. +// +// Typical usage: +// +// f := indexer.NewFetcher(streamClient, templateID, logger) +// f.Start(ctx, lastProcessedOffset) +// for tx := range f.Events() { ... } +type Fetcher struct { + stream streaming.Streamer + templateID streaming.TemplateID + out chan *streaming.LedgerTransaction + logger *zap.Logger +} + +// NewFetcher creates a new Fetcher. +// +// - stream: Canton streaming client (handles reconnection, auth, backoff) +// - templateID: DAML template to subscribe to (e.g. TokenTransferEvent) +// - logger: caller-provided logger +func NewFetcher(stream streaming.Streamer, templateID streaming.TemplateID, logger *zap.Logger) *Fetcher { + return &Fetcher{ + stream: stream, + templateID: templateID, + out: make(chan *streaming.LedgerTransaction, txChannelCap), + logger: logger, + } +} + +// Start begins streaming from offset in a background goroutine. It is non-blocking. +// The goroutine exits when ctx is cancelled or the underlying stream closes. +// +// Start must be called exactly once before Events is used. +func (f *Fetcher) Start(ctx context.Context, offset int64) { + f.logger.Info("fetcher starting", zap.Int64("resume_offset", offset)) + + // lastOffset is updated atomically by the streaming.Client goroutine as + // transactions arrive, and read back by its reconnect loop on each new + // connection attempt, ensuring exactly-once resumption from the right point. + var lastOffset int64 + atomic.StoreInt64(&lastOffset, offset) + + txCh := f.stream.Subscribe(ctx, streaming.SubscribeRequest{ + FromOffset: offset, + TemplateIDs: []streaming.TemplateID{f.templateID}, + }, &lastOffset) + + go func() { + defer close(f.out) + for { + select { + case tx, ok := <-txCh: + if !ok { + return + } + select { + case f.out <- tx: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() +} + +// Events returns the read-only channel of LedgerTransactions. +// Must be called after Start. The channel is closed when the stream terminates. +func (f *Fetcher) Events() <-chan *streaming.LedgerTransaction { + return f.out +} diff --git a/pkg/indexer/parser.go b/pkg/indexer/parser.go new file mode 100644 index 00000000..3d21d1f5 --- /dev/null +++ b/pkg/indexer/parser.go @@ -0,0 +1,167 @@ +package indexer + +import ( + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + + "go.uber.org/zap" +) + +const ( + tokenTransferEventModule = "CIP56.Events" + tokenTransferEventEntity = "TokenTransferEvent" + + // Metadata keys for bridge context stored in TokenTransferEvent.meta.values. + metaKeyExternalTxID = "bridge.externalTxId" + metaKeyExternalAddress = "bridge.externalAddress" + metaKeyFingerprint = "bridge.fingerprint" +) + +// Parser decodes streaming.LedgerTransactions into ParsedEvents. +// +// Filtering operates at two distinct layers: +// +// 1. gRPC (template-level): the Fetcher subscribes to CIP56.Events.TokenTransferEvent +// via TemplateID, reducing network traffic to only that contract type. This is done +// at the Canton Ledger API level and cannot filter by instrument payload. +// PackageID="" in the TemplateID enables all-packages mode, so any third-party +// CIP56-compliant token is automatically included at this layer. +// +// 2. App-level (instrument-level): the Parser further filters by InstrumentKey{Admin, ID}. +// This is necessary because the gRPC API cannot filter by contract field values. +// InstrumentKey is the Canton equivalent of an ERC-20 contract address — it uniquely +// identifies a specific token deployment by its issuer party and token identifier. +type Parser struct { + mode FilterMode + allowedInstruments map[InstrumentKey]struct{} + logger *zap.Logger +} + +// NewParser creates a new Parser. +// +// - mode: FilterModeAll or FilterModeWhitelist. +// - allowedInstruments: InstrumentKeys to accept (Canton equivalent of ERC-20 contract addresses). +// Each key is {Admin: issuerPartyID, ID: tokenID}. Both fields must match. +// Ignored when mode is FilterModeAll. +// - logger: caller-provided logger. +func NewParser(mode FilterMode, allowedInstruments []InstrumentKey, logger *zap.Logger) *Parser { + allowed := make(map[InstrumentKey]struct{}, len(allowedInstruments)) + for _, k := range allowedInstruments { + allowed[k] = struct{}{} + } + return &Parser{ + mode: mode, + allowedInstruments: allowed, + logger: logger, + } +} + +// Parse extracts and decodes all TokenTransferEvent created-events from tx. +// Returns one ParsedEvent per matched event; events that do not match the template, +// fail the instrument filter, or contain an invalid party combination are dropped. +func (p *Parser) Parse(tx *streaming.LedgerTransaction) []*ParsedEvent { + out := make([]*ParsedEvent, 0, len(tx.Events)) + + for _, ev := range tx.Events { + if !ev.IsCreated { + continue // archived events carry no field data — nothing to index + } + if ev.ModuleName != tokenTransferEventModule || ev.TemplateName != tokenTransferEventEntity { + continue + } + + instrumentID := ev.NestedTextField("instrumentId", "id") + instrumentAdmin := ev.NestedPartyField("instrumentId", "admin") + key := InstrumentKey{Admin: instrumentAdmin, ID: instrumentID} + + if !p.instrumentAllowed(key) { + p.logger.Debug("skipping event for unlisted instrument", + zap.String("instrument_id", instrumentID), + zap.String("instrument_admin", instrumentAdmin), + zap.String("contract_id", ev.ContractID), + ) + continue + } + + pe := p.decode(tx, ev, instrumentID) + if pe == nil { + continue + } + out = append(out, pe) + } + + return out +} + +// decode converts a single TokenTransferEvent LedgerEvent into a ParsedEvent. +// Returns nil when the event contains an invalid party combination (both absent). +func (p *Parser) decode(tx *streaming.LedgerTransaction, ev *streaming.LedgerEvent, instrumentID string) *ParsedEvent { + fromPartyID := optionalParty(ev, "fromParty") + toPartyID := optionalParty(ev, "toParty") + + var et EventType + switch { + case fromPartyID == nil && toPartyID != nil: + et = EventMint + case fromPartyID != nil && toPartyID == nil: + et = EventBurn + case fromPartyID != nil && toPartyID != nil: + et = EventTransfer + default: + p.logger.Warn("dropping TokenTransferEvent with both parties absent", + zap.String("contract_id", ev.ContractID), + zap.String("tx_id", tx.UpdateID), + zap.String("instrument_id", instrumentID), + ) + return nil + } + + return &ParsedEvent{ + InstrumentID: instrumentID, + InstrumentAdmin: ev.NestedPartyField("instrumentId", "admin"), + Issuer: ev.PartyField("issuer"), + EventType: et, + Amount: ev.NumericField("amount"), + FromPartyID: fromPartyID, + ToPartyID: toPartyID, + ExternalTxID: optionalMeta(ev, metaKeyExternalTxID), + ExternalAddress: optionalMeta(ev, metaKeyExternalAddress), + Fingerprint: optionalMeta(ev, metaKeyFingerprint), + ContractID: ev.ContractID, + TxID: tx.UpdateID, + LedgerOffset: tx.Offset, + Timestamp: ev.TimestampField("timestamp"), + EffectiveTime: tx.EffectiveTime, + } +} + +// instrumentAllowed returns true when the InstrumentKey passes the filter. +func (p *Parser) instrumentAllowed(key InstrumentKey) bool { + if p.mode == FilterModeAll { + return true + } + _, ok := p.allowedInstruments[key] + return ok +} + +// optionalParty extracts a DAML Optional Party field as *string. +// Returns nil when the field is None. +func optionalParty(ev *streaming.LedgerEvent, name string) *string { + if ev.IsNone(name) { + return nil + } + v := ev.OptionalPartyField(name) + if v == "" { + return nil + } + return &v +} + +// optionalMeta looks up a bridge metadata key and returns a *string. +// Returns nil when meta is None or the key is absent. +func optionalMeta(ev *streaming.LedgerEvent, key string) *string { + v := ev.OptionalMetaLookup("meta", key) + if v == "" { + return nil + } + return &v +} diff --git a/pkg/indexer/types.go b/pkg/indexer/types.go new file mode 100644 index 00000000..12e3fc5e --- /dev/null +++ b/pkg/indexer/types.go @@ -0,0 +1,87 @@ +package indexer + +import "time" + +// EventType classifies a TokenTransferEvent as MINT, BURN, or TRANSFER. +// Derived from the fromParty/toParty Optional fields — mirrors ERC-20 Transfer semantics: +// +// MINT: fromParty = None, toParty = Some(recipient) +// BURN: fromParty = Some(owner), toParty = None +// TRANSFER: fromParty = Some(sender), toParty = Some(receiver) +type EventType string + +const ( + EventMint EventType = "MINT" + EventBurn EventType = "BURN" + EventTransfer EventType = "TRANSFER" +) + +// ParsedEvent is a fully decoded TokenTransferEvent ready for the processor. +// +// Fields map directly to the DAML TokenTransferEvent template in CIP56.Events: +// +// issuer → Issuer +// instrumentId → InstrumentID (id field) + InstrumentAdmin (admin field) +// fromParty → FromPartyID (*string, nil for mints) +// toParty → ToPartyID (*string, nil for burns) +// amount → Amount (decimal string) +// timestamp → Timestamp (contract-level time, from the DAML event) +// meta.values → ExternalTxID, ExternalAddress, Fingerprint (bridge context, nil for transfers) +// +// ContractID (the TokenTransferEvent contract ID) is the idempotency key used +// as event_id in the store — guaranteed unique across the ledger. +// +// Primary identity throughout is canton_party_id — no EVM address at this layer. +type ParsedEvent struct { + // Instrument identification — fully qualified by both fields. + InstrumentID string // instrumentId.id — token identifier (e.g. "DEMO", "PROMPT") + InstrumentAdmin string // instrumentId.admin — token admin/issuer party + + // Issuer of the TokenTransferEvent contract (the token config issuer). + Issuer string + + // Transfer semantics, mirroring ERC-20 Transfer(from, to, value). + EventType EventType + Amount string // decimal string, e.g. "1.500000000000000000" + FromPartyID *string // nil for mints + ToPartyID *string // nil for burns + + // Bridge audit context extracted from meta.values (nil for native peer-to-peer transfers). + ExternalTxID *string // meta["bridge.externalTxId"] — EVM transaction hash + ExternalAddress *string // meta["bridge.externalAddress"] — EVM destination address + Fingerprint *string // meta["bridge.fingerprint"] — user fingerprint + + // Provenance. + ContractID string // TokenTransferEvent contract ID — idempotency key (event_id in store) + TxID string // Ledger transaction UpdateId + LedgerOffset int64 // Ledger offset of the containing transaction + Timestamp time.Time // Contract-level time from TokenTransferEvent.timestamp + EffectiveTime time.Time // Ledger transaction effective time +} + +// InstrumentKey is the Canton equivalent of an ERC-20 contract address. +// It uniquely identifies a CIP56 token deployment. +// Corresponds to the DAML InstrumentId{admin: Party, id: Text} record. +// +// instrumentId.id alone is NOT unique — two different issuers can both deploy +// a token with id="DEMO". The full {Admin, ID} pair IS unique and is the correct +// key for whitelisting specific token deployments. +type InstrumentKey struct { + Admin string // instrumentId.admin — the token admin/issuer party + ID string // instrumentId.id — the token identifier (e.g. "DEMO") +} + +// FilterMode controls which token instruments the Parser processes. +type FilterMode int + +const ( + // FilterModeAll indexes events from every instrument — equivalent to a global + // ERC-20 Transfer log covering all CIP56 token deployments visible to the indexer. + FilterModeAll FilterMode = iota + + // FilterModeWhitelist indexes only events whose InstrumentKey{Admin, ID} is in + // the allowed set. Use this for an operator who manages a fixed set of tokens. + // Both Admin and ID must match — this is the Canton equivalent of whitelisting + // by ERC-20 contract address. + FilterModeWhitelist +) From 52e6403957cff7216fc8f28a01158888b4a8a628 Mon Sep 17 00:00:00 2001 From: sadiq1971 Date: Wed, 18 Mar 2026 02:55:33 +0600 Subject: [PATCH 2/5] feat: indexer refactored --- indexer-design.md | 1699 +++++++++++++++++ pkg/cantonsdk/streaming/builder.go | 68 + pkg/cantonsdk/streaming/client.go | 4 +- pkg/cantonsdk/streaming/stream.go | 62 + pkg/cantonsdk/streaming/types.go | 19 + pkg/cantonsdk/values/decode.go | 4 +- pkg/indexer/engine/decoder.go | 127 ++ pkg/indexer/engine/decoder_test.go | 248 +++ pkg/indexer/{ => engine}/fetcher.go | 40 +- .../engine/mocks/mock_event_fetcher.go | 121 ++ pkg/indexer/engine/mocks/mock_store.go | 335 ++++ pkg/indexer/engine/processor.go | 203 ++ pkg/indexer/engine/processor_test.go | 338 ++++ pkg/indexer/parser.go | 167 -- pkg/indexer/store.go | 47 + pkg/indexer/types.go | 47 + 16 files changed, 3342 insertions(+), 187 deletions(-) create mode 100644 indexer-design.md create mode 100644 pkg/cantonsdk/streaming/builder.go create mode 100644 pkg/cantonsdk/streaming/stream.go create mode 100644 pkg/indexer/engine/decoder.go create mode 100644 pkg/indexer/engine/decoder_test.go rename pkg/indexer/{ => engine}/fetcher.go (56%) create mode 100644 pkg/indexer/engine/mocks/mock_event_fetcher.go create mode 100644 pkg/indexer/engine/mocks/mock_store.go create mode 100644 pkg/indexer/engine/processor.go create mode 100644 pkg/indexer/engine/processor_test.go delete mode 100644 pkg/indexer/parser.go create mode 100644 pkg/indexer/store.go diff --git a/indexer-design.md b/indexer-design.md new file mode 100644 index 00000000..07b9d44c --- /dev/null +++ b/indexer-design.md @@ -0,0 +1,1699 @@ +# Canton ERC-20 Indexer — Design Document + +> **Status:** Design / Pre-Implementation +> **CIP Reference:** CIP-0086 (ERC-20 Middleware & Distributed Indexer) +> **Scope:** Phase 1 — CIP-56 token indexer (DEMO + PROMPT); no Canton Coin yet + +--- + +## Table of Contents + +1. [Background & Motivation](#1-background--motivation) +2. [Current State & Gaps](#2-current-state--gaps) +3. [Key Design Questions Answered](#3-key-design-questions-answered) +4. [Architecture Overview](#4-architecture-overview) +5. [DAML Contract Change — Unified `TokenTransferEvent`](#5-daml-contract-change--unified-tokentransferevent) +6. [Component Deep-Dive](#6-component-deep-dive) + - 6.1 [cantonsdk Streaming Client](#61-cantonsdk-streaming-client-new-package) + - 6.2 [Fetcher](#62-fetcher) + - 6.3 [Parser](#63-parser) + - 6.4 [Processor](#64-processor) + - 6.5 [Store — Models & PostgreSQL](#65-store--models--postgresql) + - 6.6 [Database Migrations (Go code)](#66-database-migrations-go-code) + - 6.7 [Service Layer](#67-service-layer) + - 6.8 [API / HTTP Layer](#68-api--http-layer) +7. [File & Directory Layout](#7-file--directory-layout) +8. [Pseudo-code & Data Flows](#8-pseudo-code--data-flows) +9. [Configuration](#9-configuration) +10. [Integration with API Server](#10-integration-with-api-server) +11. [Open Questions & Future Work](#11-open-questions--future-work) + +--- + +## 1. Background & Motivation + +CIP-0086 mandates a **distributed indexer** that aggregates Canton token state and exposes +ERC-20-compatible HTTP endpoints. The current `reconciler` in `pkg/reconciler/` is a +periodic polling loop (snapshot-based) that: + +- Queries all `CIP56Holding` active contracts every N seconds +- Aggregates per-party balances — only current state, no history +- Tracks only bridge (mint/burn) events via `bridge_events` table +- Misses transfers made directly on Canton (visible only via holdings snapshot) + +**What the reconciler lacks:** +- Continuous streaming — balance lag between polls, events missed +- Transfer event history — can't answer "show me all transfers for party X" +- Resumability — replays from scratch on restart +- Independent HTTP query API +- Scalability — hard-coded to DEMO/PROMPT package IDs + +The indexer is a **separate, independent binary** (`cmd/indexer`) with no dependency on +the api-server's user table or user registration flow. It is Canton-native: it speaks +`canton_party_id` as its primary identity. EVM address mapping is the api-server's +responsibility, not the indexer's. + +--- + +## 2. Current State & Gaps + +``` +Current Architecture (reconciler, inside api-server process): + + StartPeriodicReconciliation(interval) + │ + ▼ every N seconds + ReconcileAll() + ├── GetAllHoldings() → StateService.GetActiveContracts() (snapshot) + ├── SetBalanceByCantonPartyID() + ├── SetTotalSupply() + ├── GetMintEvents() → active contract query (no streaming) + └── GetBurnEvents() → "Transfers are internal Canton operations, not tracked" + + PostgreSQL: user_token_balances, bridge_events, token_metrics + +Gaps: + ✗ No transfer history — only current balance + ✗ Balance lag between reconcile intervals + ✗ Not resumable (no ledger offset checkpoint) + ✗ No independent HTTP query API + ✗ Restarts replay from offset 0 + ✗ Coupled to api-server process and userstore +``` + +--- + +## 3. Key Design Questions Answered + +### Q1: Use / extend cantonsdk for the fetcher? + +**Yes — add `pkg/cantonsdk/streaming/` as a new generic streaming package.** + +The existing `pkg/cantonsdk/bridge/client.go` already uses `UpdateService.GetUpdates` +(gRPC server-streaming) inside `StreamWithdrawalEvents`, with exponential backoff +reconnect, auth token invalidation on 401, and offset resumption. The new package +formalises this pattern as a reusable, generic streaming client. The indexer fetcher +delegates entirely to it. + +**WebSocket note:** Canton's gRPC API does not support WebSocket. The Canton→indexer +connection is always gRPC HTTP/2 server-streaming. WebSocket is a Phase 2 option for the +indexer→client direction (real-time event subscriptions). + +### Q2: Add `TransferEvent`? Use a unified event for all cases? + +**Yes — add a single `TokenTransferEvent` DAML template covering MINT, BURN, TRANSFER.** + +This mirrors ERC-20's `Transfer(address indexed from, address indexed to, uint256 value)`: + +- **MINT**: `fromParty = None` +- **BURN**: `toParty = None` +- **TRANSFER**: both set + +The indexer subscribes to **only this one template** — no inference heuristics, no holding +lifecycle correlation. Clean, deterministic, ERC-20 idiomatic. + +**Does it violate Canton privacy?** No. The observer pattern is: +``` +signatory issuer ← indexer runs as issuer, sees all events +observer fromParty, toParty, auditObservers +``` +Identical to the existing `MintEvent` / `BurnEvent` pattern. Parties only see events they +are party to. The indexer (as issuer) has full visibility — same as the current +reconciler. Existing `MintEvent` / `BurnEvent` are kept for backward compatibility. + +**No return type changes needed.** In DAML, `create` inside a choice is a side-effect; +the new event is emitted without touching existing choice signatures. + +### Q3: Why does the indexer NOT depend on userstore? + +**The indexer is a Canton-native service. Its primary identity is `canton_party_id`.** + +The EVM address → party_id mapping is a concern of the api-server, not the indexer. +Coupling the indexer to `userstore` would: +- Make it non-deployable independently (always needs api-server's DB schema) +- Break the separation of concerns (indexer = ledger aggregator, not user registry) +- Prevent it from serving non-EVM Canton parties in the future + +**How callers query the indexer without userstore:** + +| Caller | Flow | +|--------|------| +| **api-server** | Resolves EVM address → `canton_party_id` via its own userstore, then calls indexer with a JWT whose claims contain `canton_party_id`. The indexer never sees the EVM address. | +| **Direct client** (wallet, dApp) | Client sends a JWT issued by the api-server (or auth server) that contains `canton_party_id`. Indexer validates JWT, extracts `canton_party_id`, scopes query. | +| **Public queries** | `totalSupply`, token metadata — no auth, no party resolution needed. | + +**The indexer's auth contract:** Validate JWT signature against the shared JWKS endpoint. +Extract `canton_party_id` from claims. Scope all queries to that party. Done. + +### Q4: Separate admin vs. user API? + +**No — two tiers: public and authenticated (by JWT party_id).** + +In ERC-20, `totalSupply()`, `name()`, `symbol()`, `decimals()` are public. The indexer +follows the same model. An admin tier can be added later if needed. + +--- + +## 4. Architecture Overview + +``` +┌───────────────────────────────────────────────────────────────────────────┐ +│ cmd/indexer (binary) │ +│ (entry → pkg/app/indexer/server.go) │ +│ │ +│ ┌──────────────────────────────────────────────────────────────────────┐ │ +│ │ pkg/cantonsdk/streaming (NEW — reusable across the project) │ │ +│ │ StreamingClient.Subscribe(templateIDs, fromOffset) │ │ +│ │ → UpdateService.GetUpdates (gRPC server-streaming) │ │ +│ │ → exponential backoff reconnect (mirrors StreamWithdrawalEvents) │ │ +│ └──────────────────────────────┬───────────────────────────────────────┘ │ +│ │ chan LedgerTransaction │ +│ ┌───────────────────────────────▼─────────────────────────────────────┐ │ +│ │ pkg/indexer/fetcher │ │ +│ │ loads checkpoint from DB → delegates to cantonsdk/streaming │ │ +│ └───────────────────────────────┬─────────────────────────────────────┘ │ +│ │ chan RawTransaction │ +│ ┌────────────────────────────────▼────────────────────────────────────┐ │ +│ │ pkg/indexer/parser │ │ +│ │ decode TokenTransferEvent → classify MINT | BURN | TRANSFER │ │ +│ │ apply package whitelist filter │ │ +│ └────────────────────────────────┬────────────────────────────────────┘ │ +│ │ chan []ParsedEvent (per tx) │ +│ ┌─────────────────────────────────▼───────────────────────────────────┐ │ +│ │ pkg/indexer/processor │ │ +│ │ BEGIN TX │ │ +│ │ INSERT transfer_events (idempotent via event_id UNIQUE) │ │ +│ │ UPSERT token_balances (±delta by canton_party_id) │ │ +│ │ UPSERT token_stats (total supply) │ │ +│ │ UPDATE ledger_checkpoints │ │ +│ │ COMMIT ← checkpoint committed atomically with events │ │ +│ └─────────────────────────────────┬───────────────────────────────────┘ │ +│ │ │ +│ PostgreSQL │ +│ │ │ +│ ┌──────────────────────────────────▼──────────────────────────────────┐ │ +│ │ pkg/indexer/service (Canton-native query service) │ │ +│ │ All queries keyed by canton_party_id — no EVM address, no user │ │ +│ │ table. Caller is responsible for resolving EVM → party_id. │ │ +│ └──────────────────────────────────┬──────────────────────────────────┘ │ +│ │ │ +│ ┌───────────────────────────────────▼─────────────────────────────────┐ │ +│ │ pkg/indexer/api — HTTP :8082 (chi router) │ │ +│ │ │ │ +│ │ Auth: JWT validation only (shared JWKS with api-server) │ │ +│ │ Claims must contain canton_party_id. │ │ +│ │ No userstore. No EVM sig verification. │ │ +│ │ │ │ +│ │ [public] GET /v1/tokens │ │ +│ │ [public] GET /v1/tokens/{symbol} │ │ +│ │ [public] GET /v1/tokens/{symbol}/totalSupply │ │ +│ │ [JWT] GET /v1/balance/{partyID}[/{symbol}] │ │ +│ │ [JWT] GET /v1/transfers/{partyID}[/{symbol}] │ │ +│ │ [JWT] GET /v1/events/{partyID} │ │ +│ │ GET /health GET /metrics │ │ +│ │ (Phase 2: add /graph for GraphQL) │ │ +│ └─────────────────────────────────────────────────────────────────────┘ │ +└───────────────────────────────────────────────────────────────────────────┘ + ▲ ▲ + Canton Ledger API v2 Callers (api-server or direct clients) + gRPC server-streaming JWT must contain canton_party_id claim + +How api-server uses the indexer: + EVM client → api-server (EVM sig auth + userstore lookup) + → api-server mints JWT with canton_party_id claim + → api-server calls indexer /v1/balance/{partyID} with that JWT + ← indexer returns balance for that party + ← api-server returns result to EVM client +``` + +--- + +## 5. DAML Contract Change — Unified `TokenTransferEvent` + +### New template in `Events.daml` + +```daml +-- contracts/canton-erc20/daml/cip56-token/src/CIP56/Events.daml + +-- Unified transfer event covering mint, burn, and transfer. +-- Mirrors ERC-20 Transfer(from, to, value): +-- MINT: fromParty = None, toParty = Some recipient +-- BURN: fromParty = Some owner, toParty = None +-- TRANSFER: fromParty = Some sender, toParty = Some receiver +template TokenTransferEvent + with + issuer : Party + fromParty : Optional Party -- None for mints + toParty : Optional Party -- None for burns + amount : Decimal + tokenSymbol : Text + eventType : Text -- "MINT" | "BURN" | "TRANSFER" + timestamp : Time + evmTxHash : Optional Text -- bridge deposit tx hash (mints only) + evmDestination : Optional Text -- bridge withdrawal address (burns only) + userFingerprint : Optional Text -- EVM fingerprint, stored for bridge audit only + auditObservers : [Party] + where + signatory issuer + observer + optional [] (\p -> [p]) fromParty, + optional [] (\p -> [p]) toParty, + auditObservers +``` + +### Emit from `TokenConfig.IssuerMint` — no return type change + +```daml +-- Config.daml — inside IssuerMint do-block, AFTER creating MintEvent: + _ <- create TokenTransferEvent with + issuer + fromParty = None + toParty = Some recipient + amount + tokenSymbol = getSymbol meta + eventType = "MINT" + timestamp = eventTime + evmTxHash + evmDestination = None + userFingerprint = Some userFingerprint + auditObservers + pure (holdingCid, eventCid) -- return type UNCHANGED +``` + +### Emit from `TokenConfig.IssuerBurn` — no return type change + +```daml +-- Config.daml — inside IssuerBurn do-block, AFTER creating BurnEvent: + _ <- create TokenTransferEvent with + issuer + fromParty = Some holding.owner + toParty = None + amount + tokenSymbol = getSymbol meta + eventType = "BURN" + timestamp = eventTime + evmTxHash = None + evmDestination + userFingerprint = Some userFingerprint + auditObservers + pure (remainderCid, eventCid) -- return type UNCHANGED +``` + +### Emit from `CIP56TransferFactory.transferFactory_transferImpl` + +```daml +-- TransferFactory.daml — AFTER creating receiverCid: + _ <- create TokenTransferEvent with + issuer = admin + fromParty = Some sender + toParty = Some receiver + amount + tokenSymbol = instrumentId.id + eventType = "TRANSFER" + timestamp = now + evmTxHash = None + evmDestination = None + userFingerprint = None -- pure Canton transfer, no EVM context + auditObservers = [] + pure TransferInstructionResult with ... -- return type UNCHANGED +``` + +> Existing `MintEvent` and `BurnEvent` are kept intact for the reconciler and bridge +> relayer during the migration window. + +--- + +## 6. Component Deep-Dive + +### 6.1 cantonsdk Streaming Client (new package) + +Mirrors `StreamWithdrawalEvents` in `pkg/cantonsdk/bridge/client.go` exactly — same +backoff, same auth invalidation on 401, same reconnect-from-offset logic — but generic +enough for any template subscription. + +```go +// pkg/cantonsdk/streaming/client.go +package streaming + +// LedgerTransaction is a decoded, typed transaction from the GetUpdates stream. +type LedgerTransaction struct { + UpdateID string + Offset int64 + EffectiveTime time.Time + Events []LedgerEvent +} + +// LedgerEvent is a single created or archived contract within a transaction. +type LedgerEvent struct { + ContractID string + PackageID string + ModuleName string + TemplateName string + IsCreated bool + Created *lapiv2.CreatedEvent // set when IsCreated=true + Archived *lapiv2.ArchivedEvent // set when IsCreated=false +} + +// SubscribeRequest configures which templates to stream and from where. +type SubscribeRequest struct { + FromOffset int64 + TemplateIDs []*lapiv2.Identifier +} + +// Client wraps UpdateService.GetUpdates with reconnection and auth handling. +type Client struct { + ledger ledger.Ledger + party string +} + +func New(l ledger.Ledger, party string) *Client { + return &Client{ledger: l, party: party} +} + +// Subscribe opens a live stream against the Canton ledger. +// Reconnects automatically with exponential backoff (5s → 60s, mirrors bridge client). +// lastOffset is updated after each received transaction. The caller commits it to DB +// so reconnects resume from the last safe point. +func (c *Client) Subscribe( + ctx context.Context, + req SubscribeRequest, + lastOffset *int64, +) <-chan *LedgerTransaction { + out := make(chan *LedgerTransaction, 100) + go func() { + defer close(out) + backoff := newExponentialBackoff(5*time.Second, 60*time.Second) + for { + err := c.runStream(ctx, &req, lastOffset, out) + if ctx.Err() != nil { + return + } + // Reload offset — processor commits it to DB on each batch + atomic.StoreInt64(&req.FromOffset, atomic.LoadInt64(lastOffset)) + log.Warn("canton stream disconnected, reconnecting", + "err", err, "resume_offset", req.FromOffset) + backoff.Wait(ctx) + } + }() + return out +} + +func (c *Client) runStream( + ctx context.Context, + req *SubscribeRequest, + lastOffset *int64, + out chan<- *LedgerTransaction, +) error { + authCtx, err := c.ledger.AuthContext(ctx) + if err != nil { + return fmt.Errorf("auth: %w", err) + } + stream, err := c.ledger.Update().GetUpdates(authCtx, &lapiv2.GetUpdatesRequest{ + BeginExclusive: req.FromOffset, + UpdateFormat: &lapiv2.UpdateFormat{ + IncludeTransactions: &lapiv2.TransactionFormat{ + EventFormat: &lapiv2.EventFormat{ + FiltersByParty: map[string]*lapiv2.Filters{ + c.party: buildTemplateFilters(req.TemplateIDs), + }, + Verbose: true, + }, + TransactionShape: lapiv2.TransactionShape_TRANSACTION_SHAPE_ACS_DELTA, + }, + }, + }) + if err != nil { + if isAuthError(err) { + c.ledger.InvalidateToken() + } + return err + } + for { + resp, err := stream.Recv() + if err != nil { + if isAuthError(err) { + c.ledger.InvalidateToken() + } + return err + } + tx := resp.GetTransaction() + if tx == nil { + continue // checkpoint or topology event — skip + } + lt := decodeLedgerTransaction(tx) + atomic.StoreInt64(lastOffset, lt.Offset) + select { + case out <- lt: + case <-ctx.Done(): + return ctx.Err() + } + } +} +``` + +### 6.2 Fetcher + +Thin wrapper — loads checkpoint offset from DB, builds the template filter, delegates to +`cantonsdk/streaming`. No business logic here. + +```go +// pkg/indexer/fetcher/fetcher.go +package fetcher + +type Fetcher struct { + streaming *streaming.Client + store store.Store + templateID *lapiv2.Identifier // TokenTransferEvent fully-resolved ID + out chan<- *streaming.LedgerTransaction +} + +func New( + s *streaming.Client, + st store.Store, + tplID *lapiv2.Identifier, + out chan<- *streaming.LedgerTransaction, +) *Fetcher { + return &Fetcher{streaming: s, store: st, templateID: tplID, out: out} +} + +func (f *Fetcher) Start(ctx context.Context) error { + cp, err := f.store.GetCheckpoint(ctx) + if err != nil { + return fmt.Errorf("load checkpoint: %w", err) + } + var lastOffset int64 = cp.LastProcessedOffset + + events := f.streaming.Subscribe(ctx, streaming.SubscribeRequest{ + FromOffset: lastOffset, + TemplateIDs: []*lapiv2.Identifier{f.templateID}, + }, &lastOffset) + + for { + select { + case tx, ok := <-events: + if !ok { + return nil + } + select { + case f.out <- tx: + case <-ctx.Done(): + return ctx.Err() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} +``` + +### 6.3 Parser + +Since the indexer subscribes only to `TokenTransferEvent`, parsing is a straightforward +DAML record decode using the existing `cantonsdk/values` helpers. No inference needed. + +```go +// pkg/indexer/parser/types.go +package parser + +type EventType string + +const ( + EventMint EventType = "MINT" + EventBurn EventType = "BURN" + EventTransfer EventType = "TRANSFER" +) + +// ParsedEvent is a fully decoded TokenTransferEvent, ready for the processor. +// Primary identity is always canton_party_id — no EVM address at this layer. +type ParsedEvent struct { + EventType EventType + TokenSymbol string + Amount string // decimal string, e.g. "1.5" + FromPartyID *string // nil for mints + ToPartyID *string // nil for burns + UserFingerprint *string // from DAML event — stored for bridge audit, not for auth + EVMTxHash *string // bridge deposit + EVMDestination *string // bridge withdrawal + ContractID string // unique idempotency key (TokenTransferEvent contract ID) + TxID string + LedgerOffset int64 + EffectiveTime time.Time +} +``` + +```go +// pkg/indexer/parser/cip56.go +package parser + +// Uses cantonsdk/values helpers (values.RecordToMap, values.Text, etc.) +// — same pattern as bridge/decode.go +func decodeTokenTransferEvent(ce *lapiv2.CreatedEvent, tx *streaming.LedgerTransaction) *ParsedEvent { + fields := values.RecordToMap(ce.CreateArguments) + + fromParty := optionalParty(fields["fromParty"]) + toParty := optionalParty(fields["toParty"]) + + var et EventType + switch { + case fromParty == nil && toParty != nil: + et = EventMint + case fromParty != nil && toParty == nil: + et = EventBurn + default: + et = EventTransfer + } + + amount, _ := values.Numeric(fields["amount"]) + return &ParsedEvent{ + EventType: et, + TokenSymbol: values.Text(fields["tokenSymbol"]), + Amount: amount.String(), + FromPartyID: fromParty, + ToPartyID: toParty, + UserFingerprint: optionalText(fields["userFingerprint"]), + EVMTxHash: optionalText(fields["evmTxHash"]), + EVMDestination: optionalText(fields["evmDestination"]), + ContractID: ce.ContractId, + TxID: tx.UpdateID, + LedgerOffset: tx.Offset, + EffectiveTime: tx.EffectiveTime, + } +} +``` + +### 6.4 Processor + +Atomic batch writer. Checkpoint update is inside the same DB transaction as the event +writes — guarantees exactly-once processing on restart. + +```go +// pkg/indexer/processor/processor.go +package processor + +func (proc *Processor) processBatch(ctx context.Context, events []*parser.ParsedEvent) error { + if len(events) == 0 { + return nil + } + lastOffset := events[len(events)-1].LedgerOffset + + return proc.store.RunInTx(ctx, func(ctx context.Context, tx store.Tx) error { + for _, ev := range events { + if err := proc.processEvent(ctx, tx, ev); err != nil { + return fmt.Errorf("event %s: %w", ev.ContractID, err) + } + } + return tx.UpdateCheckpoint(ctx, lastOffset) + }) +} + +func (proc *Processor) processEvent(ctx context.Context, tx store.Tx, ev *parser.ParsedEvent) error { + // Idempotent insert — ON CONFLICT (event_id) DO NOTHING + inserted, err := tx.InsertTransferEvent(ctx, toTransferEventDao(ev)) + if err != nil { + return err + } + if !inserted { + return nil // already committed in a previous run + } + + switch ev.EventType { + case parser.EventMint: + if err := tx.IncrementBalance(ctx, *ev.ToPartyID, ev.TokenSymbol, ev.Amount); err != nil { + return err + } + return tx.IncrementTotalSupply(ctx, ev.TokenSymbol, ev.Amount) + + case parser.EventBurn: + if err := tx.DecrementBalance(ctx, *ev.FromPartyID, ev.TokenSymbol, ev.Amount); err != nil { + return err + } + return tx.DecrementTotalSupply(ctx, ev.TokenSymbol, ev.Amount) + + case parser.EventTransfer: + if err := tx.DecrementBalance(ctx, *ev.FromPartyID, ev.TokenSymbol, ev.Amount); err != nil { + return err + } + return tx.IncrementBalance(ctx, *ev.ToPartyID, ev.TokenSymbol, ev.Amount) + } + return nil +} +``` + +--- + +### 6.5 Store — Models & PostgreSQL + +#### `pkg/indexer/store/model.go` + +DAOs follow the exact Bun ORM pattern from `pkg/reconciler/store/model.go`. +**No EVM address in `TokenBalanceDao`** — the indexer is Canton-native. `evm_address` +is the api-server's concern. + +```go +// pkg/indexer/store/model.go +package store + +import ( + "time" + "github.com/uptrace/bun" +) + +// LedgerCheckpointDao — single-row table. Offset committed atomically with each +// processed batch, guaranteeing safe restart from this point. +type LedgerCheckpointDao struct { + bun.BaseModel `bun:"table:ledger_checkpoints,alias:lc"` + ID int `bun:"id,pk,default:1"` + LastProcessedOffset int64 `bun:"last_processed_offset,notnull,default:0"` + LastTxID *string `bun:"last_tx_id,type:varchar(255)"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` +} + +// IndexedTokenDao — registry of token contracts being indexed. +type IndexedTokenDao struct { + bun.BaseModel `bun:"table:indexed_tokens,alias:it"` + PackageID string `bun:"package_id,pk,type:varchar(255)"` + TokenSymbol string `bun:"token_symbol,unique,notnull,type:varchar(50)"` + ModuleName string `bun:"module_name,notnull,type:varchar(255)"` + TemplateName string `bun:"template_name,notnull,type:varchar(255)"` + Name *string `bun:"name,type:varchar(255)"` + Decimals int16 `bun:"decimals,notnull,default:18"` + IssuerPartyID *string `bun:"issuer_party_id,type:varchar(255)"` + AddedAt time.Time `bun:"added_at,nullzero,default:current_timestamp"` +} + +// TransferEventDao — append-only event log. +// event_id = Canton contract ID of the TokenTransferEvent (globally unique). +// fingerprint is stored only because it comes from the DAML event itself (bridge audit). +// It is NOT used for auth or party resolution inside the indexer. +type TransferEventDao struct { + bun.BaseModel `bun:"table:transfer_events,alias:te"` + ID int64 `bun:"id,pk,autoincrement"` + EventID string `bun:"event_id,unique,notnull,type:varchar(512)"` + EventType string `bun:"event_type,notnull,type:varchar(20)"` // MINT|BURN|TRANSFER + TokenSymbol string `bun:"token_symbol,notnull,type:varchar(50)"` + Amount string `bun:"amount,notnull,type:numeric(38,18)"` + FromPartyID *string `bun:"from_party_id,type:varchar(255)"` // nil for mints + ToPartyID *string `bun:"to_party_id,type:varchar(255)"` // nil for burns + Fingerprint *string `bun:"fingerprint,type:varchar(128)"` // from DAML event + EVMTxHash *string `bun:"evm_tx_hash,type:varchar(255)"` + EVMDestination *string `bun:"evm_destination,type:varchar(42)"` + TransactionID *string `bun:"transaction_id,type:varchar(255)"` + LedgerOffset int64 `bun:"ledger_offset,notnull"` + EffectiveTime time.Time `bun:"effective_time,notnull"` + IndexedAt time.Time `bun:"indexed_at,nullzero,default:current_timestamp"` +} + +// TokenBalanceDao — incremental balance cache per party per token. +// Primary key: (party_id, token_symbol). +// NO evm_address — the indexer is Canton-native. EVM mapping is the api-server's job. +type TokenBalanceDao struct { + bun.BaseModel `bun:"table:token_balances,alias:tb"` + PartyID string `bun:"party_id,pk,type:varchar(255)"` + TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` + Balance string `bun:"balance,notnull,default:0,type:numeric(38,18)"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` +} + +// TokenStatDao — aggregate stats per token. +type TokenStatDao struct { + bun.BaseModel `bun:"table:token_stats,alias:ts"` + TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` + TotalSupply string `bun:"total_supply,notnull,default:0,type:numeric(38,18)"` + HolderCount int64 `bun:"holder_count,notnull,default:0"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` +} +``` + +#### `pkg/indexer/store/store.go` + +```go +// pkg/indexer/store/store.go +package store + +import "context" + +//go:generate mockery --name Store --output ./mocks +type Store interface { + GetCheckpoint(ctx context.Context) (*LedgerCheckpointDao, error) + // Queries are keyed by canton_party_id — no EVM address resolution here. + GetTokenBalance(ctx context.Context, partyID, tokenSymbol string) (*TokenBalanceDao, error) + GetTokenStat(ctx context.Context, tokenSymbol string) (*TokenStatDao, error) + ListIndexedTokens(ctx context.Context) ([]*IndexedTokenDao, error) + ListTransferEvents(ctx context.Context, filter TransferEventFilter) ([]*TransferEventDao, int, error) + UpsertIndexedToken(ctx context.Context, dao *IndexedTokenDao) error + RunInTx(ctx context.Context, fn func(ctx context.Context, tx Tx) error) error +} + +//go:generate mockery --name Tx --output ./mocks +type Tx interface { + InsertTransferEvent(ctx context.Context, dao *TransferEventDao) (inserted bool, err error) + IncrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error + DecrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error + IncrementTotalSupply(ctx context.Context, tokenSymbol, amount string) error + DecrementTotalSupply(ctx context.Context, tokenSymbol, amount string) error + UpdateCheckpoint(ctx context.Context, offset int64) error +} + +// TransferEventFilter — all fields keyed by canton_party_id, not EVM address. +type TransferEventFilter struct { + PartyID *string // filter events where from_party_id OR to_party_id = this + TokenSymbol *string + EventType *string + Page int + PageSize int +} +``` + +#### `pkg/indexer/store/pg.go` (key methods) + +```go +// pkg/indexer/store/pg.go +package store + +type pgStore struct{ db *bun.DB } + +func NewStore(db *bun.DB) Store { return &pgStore{db: db} } + +func (s *pgStore) GetTokenBalance(ctx context.Context, partyID, tokenSymbol string) (*TokenBalanceDao, error) { + dao := new(TokenBalanceDao) + err := s.db.NewSelect().Model(dao). + Where("party_id = ? AND token_symbol = ?", partyID, tokenSymbol). + Scan(ctx) + if errors.Is(err, sql.ErrNoRows) { + return &TokenBalanceDao{PartyID: partyID, TokenSymbol: tokenSymbol, Balance: "0"}, nil + } + return dao, err +} + +func (s *pgStore) ListTransferEvents(ctx context.Context, f TransferEventFilter) ([]*TransferEventDao, int, error) { + var rows []*TransferEventDao + q := s.db.NewSelect().Model(&rows).OrderExpr("ledger_offset DESC") + + if f.PartyID != nil { + q = q.Where("(from_party_id = ? OR to_party_id = ?)", *f.PartyID, *f.PartyID) + } + if f.TokenSymbol != nil { + q = q.Where("token_symbol = ?", *f.TokenSymbol) + } + if f.EventType != nil { + q = q.Where("event_type = ?", *f.EventType) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, 0, fmt.Errorf("count events: %w", err) + } + + pageSize := f.PageSize + if pageSize <= 0 { pageSize = 20 } + page := f.Page + if page <= 0 { page = 1 } + + err = q.Limit(pageSize).Offset((page - 1) * pageSize).Scan(ctx) + return rows, total, err +} + +func (s *pgStore) RunInTx(ctx context.Context, fn func(ctx context.Context, tx Tx) error) error { + return s.db.RunInTx(ctx, nil, func(ctx context.Context, bunTx bun.Tx) error { + return fn(ctx, &pgTx{db: bunTx}) + }) +} + +type pgTx struct{ db bun.Tx } + +func (t *pgTx) InsertTransferEvent(ctx context.Context, dao *TransferEventDao) (bool, error) { + res, err := t.db.NewInsert().Model(dao). + On("CONFLICT (event_id) DO NOTHING"). + Exec(ctx) + if err != nil { + return false, fmt.Errorf("insert transfer event: %w", err) + } + rows, _ := res.RowsAffected() + return rows > 0, nil +} + +func (t *pgTx) IncrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error { + _, err := t.db.NewInsert(). + TableExpr("token_balances"). + ColumnExpr("party_id, token_symbol, balance, updated_at"). + Value("?, ?, ?, NOW()", partyID, tokenSymbol, amount). + On("CONFLICT (party_id, token_symbol) DO UPDATE"). + Set("balance = token_balances.balance + EXCLUDED.balance"). + Set("updated_at = NOW()"). + Exec(ctx) + return err +} + +func (t *pgTx) DecrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error { + _, err := t.db.NewUpdate().TableExpr("token_balances"). + Set("balance = balance - ?", amount). + Set("updated_at = NOW()"). + Where("party_id = ? AND token_symbol = ?", partyID, tokenSymbol). + Exec(ctx) + return err +} + +func (t *pgTx) UpdateCheckpoint(ctx context.Context, offset int64) error { + _, err := t.db.NewUpdate().Model((*LedgerCheckpointDao)(nil)). + Set("last_processed_offset = ?", offset). + Set("updated_at = NOW()"). + Where("id = 1"). + Exec(ctx) + return err +} +``` + +--- + +### 6.6 Database Migrations (Go code) + +Package `indexerdb`, same pattern as `pkg/migrations/apidb/`. +Inline DAO structs per migration file keep migrations self-contained. + +```go +// pkg/migrations/indexerdb/migrations.go +package indexerdb + +import "github.com/uptrace/bun/migrate" + +var Migrations = migrate.NewMigrations() +``` + +```go +// pkg/migrations/indexerdb/1_create_ledger_checkpoints.go +package indexerdb + +import ( + "context" + "log" + "time" + + mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister( + func(ctx context.Context, db *bun.DB) error { + log.Println("creating ledger_checkpoints table...") + type dao struct { + bun.BaseModel `bun:"table:ledger_checkpoints"` + ID int `bun:"id,pk,default:1"` + LastProcessedOffset int64 `bun:"last_processed_offset,notnull,default:0"` + LastTxID *string `bun:"last_tx_id,type:varchar(255)"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` + } + if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { + return err + } + _, err := db.ExecContext(ctx, + `INSERT INTO ledger_checkpoints (id, last_processed_offset) + VALUES (1, 0) ON CONFLICT DO NOTHING;`) + return err + }, + func(ctx context.Context, db *bun.DB) error { + log.Println("dropping ledger_checkpoints table...") + type dao struct { + bun.BaseModel `bun:"table:ledger_checkpoints"` + } + return mghelper.DropTables(ctx, db, (*dao)(nil)) + }, + ) +} +``` + +```go +// pkg/migrations/indexerdb/2_create_indexed_tokens.go +package indexerdb + +import ( + "context" + "log" + "time" + + mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister( + func(ctx context.Context, db *bun.DB) error { + log.Println("creating indexed_tokens table...") + type dao struct { + bun.BaseModel `bun:"table:indexed_tokens"` + PackageID string `bun:"package_id,pk,type:varchar(255)"` + TokenSymbol string `bun:"token_symbol,unique,notnull,type:varchar(50)"` + ModuleName string `bun:"module_name,notnull,type:varchar(255)"` + TemplateName string `bun:"template_name,notnull,type:varchar(255)"` + Name *string `bun:"name,type:varchar(255)"` + Decimals int16 `bun:"decimals,notnull,default:18"` + IssuerPartyID *string `bun:"issuer_party_id,type:varchar(255)"` + AddedAt time.Time `bun:"added_at,nullzero,default:current_timestamp"` + } + return mghelper.CreateSchema(ctx, db, (*dao)(nil)) + }, + func(ctx context.Context, db *bun.DB) error { + log.Println("dropping indexed_tokens table...") + type dao struct { + bun.BaseModel `bun:"table:indexed_tokens"` + } + return mghelper.DropTables(ctx, db, (*dao)(nil)) + }, + ) +} +``` + +```go +// pkg/migrations/indexerdb/3_create_transfer_events.go +package indexerdb + +import ( + "context" + "log" + "time" + + mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister( + func(ctx context.Context, db *bun.DB) error { + log.Println("creating transfer_events table...") + type dao struct { + bun.BaseModel `bun:"table:transfer_events"` + ID int64 `bun:"id,pk,autoincrement"` + EventID string `bun:"event_id,unique,notnull,type:varchar(512)"` + EventType string `bun:"event_type,notnull,type:varchar(20)"` + TokenSymbol string `bun:"token_symbol,notnull,type:varchar(50)"` + Amount string `bun:"amount,notnull,type:numeric(38,18)"` + FromPartyID *string `bun:"from_party_id,type:varchar(255)"` + ToPartyID *string `bun:"to_party_id,type:varchar(255)"` + Fingerprint *string `bun:"fingerprint,type:varchar(128)"` + EVMTxHash *string `bun:"evm_tx_hash,type:varchar(255)"` + EVMDestination *string `bun:"evm_destination,type:varchar(42)"` + TransactionID *string `bun:"transaction_id,type:varchar(255)"` + LedgerOffset int64 `bun:"ledger_offset,notnull"` + EffectiveTime time.Time `bun:"effective_time,notnull"` + IndexedAt time.Time `bun:"indexed_at,nullzero,default:current_timestamp"` + } + if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { + return err + } + // Indexes: all events for a party (sent or received), fingerprint, bridge + if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "from_party_id", "token_symbol"); err != nil { + return err + } + if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "to_party_id", "token_symbol"); err != nil { + return err + } + if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "fingerprint"); err != nil { + return err + } + if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "evm_tx_hash"); err != nil { + return err + } + return mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "ledger_offset") + }, + func(ctx context.Context, db *bun.DB) error { + log.Println("dropping transfer_events table...") + type dao struct { + bun.BaseModel `bun:"table:transfer_events"` + } + return mghelper.DropTables(ctx, db, (*dao)(nil)) + }, + ) +} +``` + +```go +// pkg/migrations/indexerdb/4_create_token_balances.go +package indexerdb + +import ( + "context" + "log" + "time" + + mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister( + func(ctx context.Context, db *bun.DB) error { + log.Println("creating token_balances table...") + // NOTE: No evm_address column. The indexer is Canton-native. + // EVM address resolution is the api-server's responsibility. + type dao struct { + bun.BaseModel `bun:"table:token_balances"` + PartyID string `bun:"party_id,pk,type:varchar(255)"` + TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` + Balance string `bun:"balance,notnull,default:0,type:numeric(38,18)"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` + } + if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { + return err + } + return mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "token_symbol") + }, + func(ctx context.Context, db *bun.DB) error { + log.Println("dropping token_balances table...") + type dao struct { + bun.BaseModel `bun:"table:token_balances"` + } + return mghelper.DropTables(ctx, db, (*dao)(nil)) + }, + ) +} +``` + +```go +// pkg/migrations/indexerdb/5_create_token_stats.go +package indexerdb + +import ( + "context" + "log" + "time" + + mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister( + func(ctx context.Context, db *bun.DB) error { + log.Println("creating token_stats table...") + type dao struct { + bun.BaseModel `bun:"table:token_stats"` + TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` + TotalSupply string `bun:"total_supply,notnull,default:0,type:numeric(38,18)"` + HolderCount int64 `bun:"holder_count,notnull,default:0"` + UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` + } + return mghelper.CreateSchema(ctx, db, (*dao)(nil)) + }, + func(ctx context.Context, db *bun.DB) error { + log.Println("dropping token_stats table...") + type dao struct { + bun.BaseModel `bun:"table:token_stats"` + } + return mghelper.DropTables(ctx, db, (*dao)(nil)) + }, + ) +} +``` + +--- + +### 6.7 Service Layer + +Canton-native query service. All methods keyed by `canton_party_id`. No userstore. +No EVM address. Mirrors `pkg/token/service.go` in structure. + +```go +// pkg/indexer/service/service.go +package service + +import ( + "context" + "github.com/chainsafe/canton-middleware/pkg/indexer/store" +) + +//go:generate mockery --name Service --output ./mocks +type Service interface { + ListTokens(ctx context.Context) ([]*TokenInfo, error) + GetTokenInfo(ctx context.Context, tokenSymbol string) (*TokenInfo, error) + // All balance/history queries take canton_party_id as the primary identifier. + // The caller (api-server) is responsible for resolving EVM address → party_id + // before calling these methods. + GetBalance(ctx context.Context, partyID, tokenSymbol string) (*Balance, error) + GetTransferHistory(ctx context.Context, partyID string, filter TransferFilter) (*TransferPage, error) +} + +type indexerService struct { + store store.Store +} + +func NewService(s store.Store) Service { + return &indexerService{store: s} +} + +func (s *indexerService) GetBalance(ctx context.Context, partyID, tokenSymbol string) (*Balance, error) { + dao, err := s.store.GetTokenBalance(ctx, partyID, tokenSymbol) + if err != nil { + return nil, fmt.Errorf("get balance: %w", err) + } + token, err := s.store.GetTokenStat(ctx, tokenSymbol) + if err != nil { + return nil, fmt.Errorf("get token: %w", err) + } + return toBalance(dao, token), nil +} + +func (s *indexerService) GetTransferHistory(ctx context.Context, partyID string, f TransferFilter) (*TransferPage, error) { + rows, total, err := s.store.ListTransferEvents(ctx, store.TransferEventFilter{ + PartyID: &partyID, + TokenSymbol: f.TokenSymbol, + EventType: f.EventType, + Page: f.Page, + PageSize: f.PageSize, + }) + if err != nil { + return nil, fmt.Errorf("get transfer history: %w", err) + } + return toTransferPage(rows, total, f), nil +} +``` + +```go +// pkg/indexer/service/types.go +package service + +type TokenInfo struct { + Symbol string `json:"symbol"` + Name string `json:"name"` + Decimals int `json:"decimals"` + TotalSupply string `json:"total_supply"` + HolderCount int64 `json:"holder_count"` +} + +// Balance is keyed by canton_party_id. The api-server maps EVM → party_id before +// calling the indexer and may re-map the response back to EVM context for its clients. +type Balance struct { + PartyID string `json:"party_id"` + TokenSymbol string `json:"token_symbol"` + Balance string `json:"balance"` // raw (18 decimals) + BalanceFormatted string `json:"balance_formatted"` // human readable + Decimals int `json:"decimals"` +} + +type TransferEvent struct { + EventID string `json:"event_id"` + EventType string `json:"event_type"` // MINT | BURN | TRANSFER + FromPartyID *string `json:"from_party_id"` + ToPartyID *string `json:"to_party_id"` + Amount string `json:"amount"` + AmountFormatted string `json:"amount_formatted"` + TokenSymbol string `json:"token_symbol"` + EVMTxHash *string `json:"evm_tx_hash,omitempty"` + LedgerOffset int64 `json:"ledger_offset"` + EffectiveTime string `json:"effective_time"` +} + +type TransferPage struct { + Total int `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` + Events []TransferEvent `json:"events"` +} + +type TransferFilter struct { + TokenSymbol *string + EventType *string + Page int + PageSize int +} +``` + +--- + +### 6.8 API / HTTP Layer + +**Auth: JWT only.** The JWT is issued by the api-server (or a shared auth service) after +authenticating the user via EVM signature. The JWT claims must contain `canton_party_id`. +The indexer validates the JWT signature against the shared JWKS endpoint and extracts +`canton_party_id` from claims — no userstore, no EVM signature verification here. + +**Endpoints use `partyID` as path param**, not EVM address. The api-server is the +translator between EVM world and Canton-native world. + +```go +// pkg/indexer/api/server.go +package api + +func RegisterRoutes(r chi.Router, svc service.Service, cfg AuthConfig, logger *zap.Logger) { + h := newHandler(svc, logger) + + // Public — no auth (totalSupply is public per ERC-20 spec) + r.Get("/v1/tokens", h.listTokens) + r.Get("/v1/tokens/{symbol}", h.getToken) + r.Get("/v1/tokens/{symbol}/totalSupply", h.getTotalSupply) + + // JWT-authenticated — scoped to the party_id in the JWT claims + r.Group(func(r chi.Router) { + r.Use(authMiddleware(cfg)) + r.Get("/v1/balance/{partyID}", h.getBalance) + r.Get("/v1/balance/{partyID}/{symbol}", h.getBalanceBySymbol) + r.Get("/v1/transfers/{partyID}", h.getTransfers) + r.Get("/v1/transfers/{partyID}/{symbol}", h.getTransfersBySymbol) + r.Get("/v1/events/{partyID}", h.getTransfers) // alias + }) +} +``` + +```go +// pkg/indexer/api/middleware.go +package api + +// AuthConfig holds the JWKS URL for JWT validation. +// No userstore reference — the indexer does not know about EVM addresses. +type AuthConfig struct { + JWKSUrl string +} + +// Claims are extracted from the JWT. The JWT is issued by the api-server +// and must carry canton_party_id so the indexer can scope queries. +type Claims struct { + CantonPartyID string `json:"canton_party_id"` + // Other standard JWT fields (exp, iat, sub) handled by the JWT library +} + +type principalKey struct{} + +// authMiddleware validates the JWT and stores the party_id in context. +// Only JWT Bearer tokens are accepted — no EVM signature verification. +func authMiddleware(cfg AuthConfig) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + bearer := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") + if bearer == "" { + writeError(w, http.StatusUnauthorized, errors.New("Bearer token required")) + return + } + claims, err := validateJWT(bearer, cfg.JWKSUrl) + if err != nil { + writeError(w, http.StatusUnauthorized, err) + return + } + if claims.CantonPartyID == "" { + writeError(w, http.StatusUnauthorized, + errors.New("JWT missing canton_party_id claim")) + return + } + ctx := context.WithValue(r.Context(), principalKey{}, claims.CantonPartyID) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} + +// scopeCheck ensures the authenticated party can only read its own data. +func scopeCheck(r *http.Request, requestedPartyID string) error { + partyID, ok := r.Context().Value(principalKey{}).(string) + if !ok || partyID == "" { + return errors.New("unauthenticated") + } + if partyID != requestedPartyID { + return errors.New("access denied: can only query own party data") + } + return nil +} +``` + +```go +// pkg/indexer/api/handler.go +package api + +func (h *handler) getBalance(w http.ResponseWriter, r *http.Request) { + partyID := chi.URLParam(r, "partyID") + if err := scopeCheck(r, partyID); err != nil { + writeError(w, http.StatusForbidden, err) + return + } + symbol := chi.URLParam(r, "symbol") // may be "" for all-tokens variant + bal, err := h.svc.GetBalance(r.Context(), partyID, symbol) + if err != nil { + writeError(w, http.StatusInternalServerError, err) + return + } + writeJSON(w, http.StatusOK, bal) +} + +func (h *handler) getTransfers(w http.ResponseWriter, r *http.Request) { + partyID := chi.URLParam(r, "partyID") + if err := scopeCheck(r, partyID); err != nil { + writeError(w, http.StatusForbidden, err) + return + } + page, _ := strconv.Atoi(r.URL.Query().Get("page")) + pageSize, _ := strconv.Atoi(r.URL.Query().Get("page_size")) + symbol := r.URL.Query().Get("token") + evtType := r.URL.Query().Get("type") + + f := service.TransferFilter{Page: page, PageSize: pageSize} + if symbol != "" { f.TokenSymbol = &symbol } + if evtType != "" { f.EventType = &evtType } + + result, err := h.svc.GetTransferHistory(r.Context(), partyID, f) + if err != nil { + writeError(w, http.StatusInternalServerError, err) + return + } + writeJSON(w, http.StatusOK, result) +} +``` + +**How the api-server calls the indexer on behalf of an EVM client:** + +```go +// pkg/token/provider/indexer.go (new provider in api-server) +// The api-server resolves EVM → party_id, mints a short-lived JWT, calls indexer. + +func (p *IndexerProvider) GetBalance(ctx context.Context, tokenSymbol, fingerprint string) (string, error) { + // 1. Resolve fingerprint → canton_party_id via userstore (api-server's own DB) + user, err := p.userStore.GetUserByFingerprint(ctx, fingerprint) + if err != nil { + return "0", err + } + // 2. Mint a short-lived internal JWT with canton_party_id claim + jwt, err := p.jwtIssuer.IssuePartyJWT(*user.CantonPartyID) + if err != nil { + return "0", err + } + // 3. Call indexer HTTP API — indexer sees only party_id, never EVM address + return p.indexerClient.GetBalance(ctx, *user.CantonPartyID, tokenSymbol, jwt) +} +``` + +--- + +## 7. File & Directory Layout + +``` +canton-middleware/ +│ +├── cmd/ +│ ├── api-server/ existing +│ ├── relayer/ existing +│ └── indexer/ NEW +│ ├── main.go loads config → app/indexer.NewServer(cfg).Run() +│ └── migrate/ +│ └── main.go runs indexerdb migrations +│ +├── pkg/ +│ │ +│ ├── app/ +│ │ ├── api/ existing (api-server orchestrator) +│ │ └── indexer/ NEW (mirrors pkg/app/api/) +│ │ └── server.go wires streaming + fetcher + parser + +│ │ processor + service + HTTP server +│ │ +│ ├── cantonsdk/ +│ │ ├── bridge/ existing — unchanged +│ │ ├── token/ existing — unchanged +│ │ ├── ledger/ existing — unchanged +│ │ ├── lapi/v2/ existing — unchanged +│ │ └── streaming/ NEW — generic ledger streaming client +│ │ ├── client.go Subscribe(), runStream(), reconnect loop +│ │ └── types.go LedgerTransaction, LedgerEvent +│ │ +│ ├── indexer/ NEW — all indexer domain packages +│ │ │ +│ │ ├── fetcher/ +│ │ │ └── fetcher.go loads checkpoint → delegates to cantonsdk/streaming +│ │ │ +│ │ ├── parser/ +│ │ │ ├── parser.go routes LedgerTransaction → []ParsedEvent +│ │ │ ├── cip56.go decodeTokenTransferEvent() via cantonsdk/values +│ │ │ ├── whitelist.go ContractFilter, WhitelistFilter, AllFilter +│ │ │ └── types.go ParsedEvent, EventType (MINT/BURN/TRANSFER) +│ │ │ +│ │ ├── processor/ +│ │ │ └── processor.go atomic batch writer: events + balances + checkpoint +│ │ │ +│ │ ├── store/ +│ │ │ ├── model.go DAOs (no evm_address in TokenBalanceDao) +│ │ │ ├── store.go Store + Tx interfaces, TransferEventFilter +│ │ │ └── pg.go pgStore + pgTx (Bun ORM) +│ │ │ +│ │ ├── service/ +│ │ │ ├── service.go Service interface + impl, all methods by party_id +│ │ │ └── types.go TokenInfo, Balance, TransferEvent, TransferPage +│ │ │ +│ │ └── api/ HTTP layer (add graph/ here in Phase 2) +│ │ ├── server.go RegisterRoutes() on chi.Router +│ │ ├── handler.go listTokens, getBalance, getTransfers +│ │ ├── middleware.go authMiddleware (JWT only), scopeCheck +│ │ └── types.go JSON response types +│ │ +│ └── migrations/ +│ ├── apidb/ existing +│ └── indexerdb/ NEW +│ ├── migrations.go var Migrations = migrate.NewMigrations() +│ ├── 1_create_ledger_checkpoints.go +│ ├── 2_create_indexed_tokens.go +│ ├── 3_create_transfer_events.go +│ ├── 4_create_token_balances.go +│ └── 5_create_token_stats.go +│ +├── contracts/ +│ └── canton-erc20/daml/cip56-token/src/CIP56/ +│ ├── Events.daml MODIFIED — add TokenTransferEvent +│ ├── Config.daml MODIFIED — emit from IssuerMint/IssuerBurn +│ └── TransferFactory.daml MODIFIED — emit from transfer choice +│ +└── docs/ + ├── indexer-design.md this document + └── indexer-gh-issue.md GitHub issue (condensed) +``` + +--- + +## 8. Pseudo-code & Data Flows + +### Orchestrator — `pkg/app/indexer/server.go` + +```go +// pkg/app/indexer/server.go +package indexer + +type Server struct{ cfg *config.IndexerConfig } + +func NewServer(cfg *config.IndexerConfig) *Server { return &Server{cfg: cfg} } + +func (s *Server) Run() error { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer stop() + + logger, _ := config.NewLogger(s.cfg.Logging) + defer logger.Sync() + + dbBun, err := pgutil.ConnectDB(&s.cfg.Database) + if err != nil { return err } + defer dbBun.Close() + + idxStore := indexerstore.NewStore(dbBun) + + ledgerClient, err := ledger.New(s.cfg.Canton) + if err != nil { return err } + defer ledgerClient.Close() + + streamClient := streaming.New(ledgerClient, s.cfg.Canton.IssuerParty) + + templateID := &lapiv2.Identifier{ + PackageId: s.cfg.Indexer.PackageID, + ModuleName: "CIP56", + EntityName: "TokenTransferEvent", + } + + var filter parser.ContractFilter + if len(s.cfg.Indexer.WhitelistedPackageIDs) > 0 { + filter = parser.NewWhitelistFilter(s.cfg.Indexer.WhitelistedPackageIDs) + } else { + filter = &parser.AllFilter{} + } + + txCh := make(chan *streaming.LedgerTransaction, 500) + parsedCh := make(chan []*parser.ParsedEvent, 100) + + f := fetcher.New(streamClient, idxStore, templateID, txCh) + p := parser.New(txCh, parsedCh, filter) + proc := processor.New(idxStore, parsedCh) + + svc := indexerservice.NewService(idxStore) // no userstore dependency + r := s.setupRouter(svc, logger) + go apphttp.ServeAndWait(ctx, r, logger, &s.cfg.Query.Server) + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return f.Start(ctx) }) + g.Go(func() error { return p.Start(ctx) }) + g.Go(func() error { return proc.Start(ctx) }) + + logger.Info("indexer running") + return g.Wait() +} + +func (s *Server) setupRouter(svc indexerservice.Service, logger *zap.Logger) chi.Router { + r := chi.NewRouter() + r.Use(middleware.RequestID, middleware.RealIP, middleware.Recoverer) + r.Use(middleware.Timeout(60 * time.Second)) + r.Get("/health", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("OK")) + }) + indexerapi.RegisterRoutes(r, svc, indexerapi.AuthConfig{ + JWKSUrl: s.cfg.Query.JWKSUrl, + }, logger) + return r +} +``` + +### Call flow: EVM client → api-server → indexer + +``` +EVM Client (MetaMask) + │ GET balance (EVM address, DEMO token) + ▼ +API Server + │ 1. Auth: VerifyEIP191Signature(evmAddress) ← pkg/auth/evm.go + │ 2. Resolve: userStore.GetUserByEVMAddress() ← userstore (api-server DB) + │ → canton_party_id + │ 3. Issue: jwtIssuer.IssuePartyJWT(canton_party_id) + │ 4. Call: indexer /v1/balance/{canton_party_id} + │ Authorization: Bearer + ▼ +Indexer + │ 5. Validate JWT → extract canton_party_id from claims + │ 6. scopeCheck: party_id matches URL param + │ 7. store.GetTokenBalance(canton_party_id, "DEMO") + │ 8. Return Balance{party_id, balance} + ▼ +API Server + │ 9. Map party_id → evm_address for client response (if needed) + ▼ +EVM Client ← {"balance": "1000000000000000000"} +``` + +--- + +## 9. Configuration + +```go +// pkg/config/indexer.go + +type IndexerConfig struct { + Logging LoggingConfig + Database DatabaseConfig // shared with api-server + Canton CantonConfig // same issuer credentials as api-server + Indexer IndexerOptions + Query IndexerQueryConfig +} + +type IndexerOptions struct { + WhitelistedPackageIDs []string `yaml:"whitelisted_package_ids"` + PackageID string `yaml:"package_id"` // TokenTransferEvent package + MaxReconnectBackoff time.Duration `yaml:"max_reconnect_backoff"` // default 60s +} + +type IndexerQueryConfig struct { + Server ServerConfig // host, port, timeouts + JWKSUrl string `yaml:"jwks_url"` // shared with api-server +} +``` + +```yaml +# config.indexer.yaml +logging: + level: info + format: json + +database: + host: localhost + port: 5432 + name: canton_middleware # same DB, indexer writes its own tables + user: postgres + password: ${POSTGRES_PASSWORD} + +canton: + endpoint: localhost:5011 + issuer_party: "Issuer::1220..." + auth: + type: oauth2 + client_id: ${CANTON_CLIENT_ID} + client_secret: ${CANTON_CLIENT_SECRET} + token_url: ${CANTON_TOKEN_URL} + +indexer: + package_id: "168483ce8a80e76f69f7392ceaa9ff57b1036b8fb41ccb3d410b087048195a92" + whitelisted_package_ids: + - "168483ce8a80e76f69f7392ceaa9ff57b1036b8fb41ccb3d410b087048195a92" # DEMO + - "" + max_reconnect_backoff: 60s + +query: + server: + host: 0.0.0.0 + port: 8082 + jwks_url: ${JWKS_URL} # same JWKS as api-server for JWT validation +``` + +--- + +## 10. Integration with API Server + +``` +Shared PostgreSQL (canton_middleware DB): + + public.* ← api-server writes + users ← EVM ↔ party_id mapping lives here, NOT in indexer + user_token_balances ← DEPRECATED after migration + bridge_events ← DEPRECATED after migration + token_metrics ← DEPRECATED after migration + + indexer.* ← indexer writes, api-server may read + ledger_checkpoints + indexed_tokens + transfer_events ← replaces bridge_events (richer, includes transfers) + token_balances ← replaces user_token_balances (keyed by party_id) + token_stats ← replaces token_metrics + +Option A (recommended Phase 1): + api-server issues a JWT → calls indexer HTTP API. + Clean separation. No shared DB reads from api-server side. + +Option B (simpler Phase 1 alternative): + api-server reads indexer.token_balances directly via SQL + (same DB, no HTTP hop needed). Requires api-server to know the indexer schema. +``` + +**Migration path:** +``` +Step 1 Deploy indexer. It builds indexer.* tables from offset 0. + Reconciler continues running in parallel. + +Step 2 Validate: compare reconciler balances vs indexer.token_balances. + Confirm TokenTransferEvent DAML upgrade is live and emitting. + +Step 3 Switch api-server token provider to call indexer API (or read indexer tables). + +Step 4 Disable reconciler. Remove after one release cycle. +``` + +--- + +## 11. Open Questions & Future Work + +| Question | Decision | +|---|---| +| DB: same instance? | Yes — same DB, indexer.* schema | +| ORM? | Bun — consistent with project | +| HTTP router? | chi — consistent with project | +| Query port? | 8082 (api=8080, relayer=8081) | +| Canton auth? | Reuse issuer OAuth2 creds from existing config | +| JWT claim name for party_id? | `canton_party_id` (custom claim) | +| api-server call mode? | Option A (HTTP) initially; can collapse to Option B (shared DB read) | +| Docker Compose? | Add `indexer` + `indexer-migrate` services | + +### Phase 2 + +1. **GraphQL** — add `pkg/indexer/graph/` alongside `pkg/indexer/api/` +2. **WebSocket push** — real-time event stream from processor → subscribed clients +3. **Canton Coin** — same code, different package IDs + Super Validator node +4. **Metrics** — `indexer_lag_offsets`, `events_per_second`, `batch_commit_duration_ms` +5. **Backfill** — `cmd/indexer/backfill/` to replay from offset 0 after package upgrades + +--- + +*Created: 2026-03-02* +*CIP Reference: https://github.com/canton-foundation/cips/blob/main/cip-0086/cip-0086.md* diff --git a/pkg/cantonsdk/streaming/builder.go b/pkg/cantonsdk/streaming/builder.go new file mode 100644 index 00000000..d1421371 --- /dev/null +++ b/pkg/cantonsdk/streaming/builder.go @@ -0,0 +1,68 @@ +package streaming + +import ( + "time" + + lapiv2 "github.com/chainsafe/canton-middleware/pkg/cantonsdk/lapi/v2" +) + +// FieldValue is an opaque DAML value used to construct LedgerEvents. +// Use the Make* functions below to create values of each DAML type. +// This keeps callers free of any direct lapiv2 dependency. +type FieldValue struct{ v *lapiv2.Value } + +// MakeTextField wraps a Go string as a DAML Text value. +func MakeTextField(s string) FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Text{Text: s}}} +} + +// MakePartyField wraps a party ID string as a DAML Party value. +func MakePartyField(s string) FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Party{Party: s}}} +} + +// MakeNumericField wraps a decimal string as a DAML Numeric value. +func MakeNumericField(s string) FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Numeric{Numeric: s}}} +} + +// MakeTimestampField wraps a time.Time as a DAML Timestamp value. +func MakeTimestampField(t time.Time) FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Timestamp{Timestamp: t.UnixMicro()}}} +} + +// MakeNoneField returns a DAML Optional None value. +func MakeNoneField() FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Optional{Optional: &lapiv2.Optional{}}}} +} + +// MakeSomePartyField returns a DAML Optional(Party) Some value. +func MakeSomePartyField(party string) FieldValue { + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Optional{Optional: &lapiv2.Optional{ + Value: &lapiv2.Value{Sum: &lapiv2.Value_Party{Party: party}}, + }}}} +} + +// MakeRecordField builds a DAML Record value from a map of sub-fields. +func MakeRecordField(fields map[string]FieldValue) FieldValue { + rf := make([]*lapiv2.RecordField, 0, len(fields)) + for k, v := range fields { + rf = append(rf, &lapiv2.RecordField{Label: k, Value: v.v}) + } + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Record{Record: &lapiv2.Record{Fields: rf}}}} +} + +// MakeSomeRecordField wraps a record in a DAML Optional(Record) Some value. +func MakeSomeRecordField(fields map[string]FieldValue) FieldValue { + inner := MakeRecordField(fields) + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_Optional{Optional: &lapiv2.Optional{Value: inner.v}}}} +} + +// MakeTextMapField builds a DAML TextMap value from a Go string map. +func MakeTextMapField(entries map[string]string) FieldValue { + es := make([]*lapiv2.TextMap_Entry, 0, len(entries)) + for k, v := range entries { + es = append(es, &lapiv2.TextMap_Entry{Key: k, Value: &lapiv2.Value{Sum: &lapiv2.Value_Text{Text: v}}}) + } + return FieldValue{&lapiv2.Value{Sum: &lapiv2.Value_TextMap{TextMap: &lapiv2.TextMap{Entries: es}}}} +} diff --git a/pkg/cantonsdk/streaming/client.go b/pkg/cantonsdk/streaming/client.go index 79154a37..7f2f0a21 100644 --- a/pkg/cantonsdk/streaming/client.go +++ b/pkg/cantonsdk/streaming/client.go @@ -55,7 +55,7 @@ func New(l ledger.Ledger, party string, opts ...Option) *Client { // resume from the last safely received point. The caller is responsible for persisting // lastOffset to the database (the processor does this atomically with event writes). // -// The returned channel is closed when ctx is cancelled or a terminal error occurs +// The returned channel is closed when ctx is canceled or a terminal error occurs // (io.EOF, context cancellation). func (c *Client) Subscribe( ctx context.Context, @@ -110,7 +110,7 @@ func (c *Client) Subscribe( } // runStream opens a single GetUpdates stream and forwards transactions to out until -// the stream ends or ctx is cancelled. It updates lastOffset atomically on each +// the stream ends or ctx is canceled. It updates lastOffset atomically on each // received transaction. func (c *Client) runStream( ctx context.Context, diff --git a/pkg/cantonsdk/streaming/stream.go b/pkg/cantonsdk/streaming/stream.go new file mode 100644 index 00000000..dd150b8c --- /dev/null +++ b/pkg/cantonsdk/streaming/stream.go @@ -0,0 +1,62 @@ +package streaming + +import "context" + +// Batch carries decoded items from one LedgerTransaction, preserving the +// transaction boundary for atomic offset writes. +type Batch[T any] struct { + Offset int64 + UpdateID string + Items []T +} + +// Stream[T] wraps a Streamer and applies a per-event decode function. +// Use when subscribing to a single homogeneous template. +type Stream[T any] struct { + streamer Streamer + decode func(*LedgerTransaction, *LedgerEvent) (T, bool) +} + +// NewStream creates a Stream[T] that decodes events using the provided function. +func NewStream[T any](streamer Streamer, decode func(*LedgerTransaction, *LedgerEvent) (T, bool)) *Stream[T] { + return &Stream[T]{streamer: streamer, decode: decode} +} + +// Subscribe passes lastOffset to streamer.Subscribe, iterates each tx's events +// through decode, and emits *Batch[T] for every tx. Items may be empty — +// offset must still advance for no-op transactions. +func (s *Stream[T]) Subscribe(ctx context.Context, req SubscribeRequest, lastOffset *int64) <-chan *Batch[T] { + txCh := s.streamer.Subscribe(ctx, req, lastOffset) + out := make(chan *Batch[T], txChannelCap) + + go func() { + defer close(out) + for { + select { + case tx, ok := <-txCh: + if !ok { + return + } + batch := &Batch[T]{ + Offset: tx.Offset, + UpdateID: tx.UpdateID, + Items: make([]T, 0, len(tx.Events)), + } + for _, ev := range tx.Events { + if item, ok := s.decode(tx, ev); ok { + batch.Items = append(batch.Items, item) + } + } + select { + case out <- batch: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return out +} diff --git a/pkg/cantonsdk/streaming/types.go b/pkg/cantonsdk/streaming/types.go index 60ee7bb9..7b869441 100644 --- a/pkg/cantonsdk/streaming/types.go +++ b/pkg/cantonsdk/streaming/types.go @@ -52,6 +52,25 @@ type LedgerEvent struct { fields map[string]*lapiv2.Value } +// NewLedgerEvent constructs a LedgerEvent with pre-decoded fields. +// Used by tests that need to build events without going through the proto decode path. +// Accepts FieldValue values produced by the Make* constructor functions so that +// callers have no direct dependency on lapiv2. +func NewLedgerEvent(contractID, packageID, moduleName, templateName string, isCreated bool, fields map[string]FieldValue) *LedgerEvent { + inner := make(map[string]*lapiv2.Value, len(fields)) + for k, v := range fields { + inner[k] = v.v + } + return &LedgerEvent{ + ContractID: contractID, + PackageID: packageID, + ModuleName: moduleName, + TemplateName: templateName, + IsCreated: isCreated, + fields: inner, + } +} + // TextField returns the named DAML Text field as a Go string. // Returns "" when the field is absent or not of type Text. func (e *LedgerEvent) TextField(name string) string { diff --git a/pkg/cantonsdk/values/decode.go b/pkg/cantonsdk/values/decode.go index 48d7c80a..bf062fc5 100644 --- a/pkg/cantonsdk/values/decode.go +++ b/pkg/cantonsdk/values/decode.go @@ -159,7 +159,7 @@ func MapLookupText(v *lapiv2.Value, key string) string { if v == nil { return "" } - // DA.TextMap.TextMap serialises as Value_TextMap + // DA.TextMap.TextMap serializes as Value_TextMap if tm, ok := v.Sum.(*lapiv2.Value_TextMap); ok && tm.TextMap != nil { for _, e := range tm.TextMap.Entries { if e.GetKey() == key { @@ -168,7 +168,7 @@ func MapLookupText(v *lapiv2.Value, key string) string { } return "" } - // DA.Map.Map serialises as Value_GenMap with Text keys + // DA.Map.Map serializes as Value_GenMap with Text keys if gm, ok := v.Sum.(*lapiv2.Value_GenMap); ok && gm.GenMap != nil { for _, e := range gm.GenMap.Entries { if Text(e.GetKey()) == key { diff --git a/pkg/indexer/engine/decoder.go b/pkg/indexer/engine/decoder.go new file mode 100644 index 00000000..53e97372 --- /dev/null +++ b/pkg/indexer/engine/decoder.go @@ -0,0 +1,127 @@ +package engine + +import ( + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + "github.com/chainsafe/canton-middleware/pkg/indexer" + + "go.uber.org/zap" +) + +const ( + tokenTransferEventModule = "CIP56.Events" + tokenTransferEventEntity = "TokenTransferEvent" + + // Metadata keys for bridge context stored in TokenTransferEvent.meta.values. + metaKeyExternalTxID = "bridge.externalTxId" + metaKeyExternalAddress = "bridge.externalAddress" + metaKeyFingerprint = "bridge.fingerprint" +) + +// NewTokenTransferDecoder returns a decode function for use with streaming.NewStream. +// +// The closure: +// - skips archived events +// - checks ModuleName == "CIP56.Events" && TemplateName == "TokenTransferEvent" +// - applies the FilterModeWhitelist instrument check when mode is FilterModeWhitelist +// - extracts all fields into a *ParsedEvent +// - returns nil, false for invalid events (both parties absent, filter miss) +func NewTokenTransferDecoder( + mode indexer.FilterMode, + allowed []indexer.InstrumentKey, + logger *zap.Logger, +) func(*streaming.LedgerTransaction, *streaming.LedgerEvent) (*indexer.ParsedEvent, bool) { + allowedMap := make(map[indexer.InstrumentKey]struct{}, len(allowed)) + for _, k := range allowed { + allowedMap[k] = struct{}{} + } + + return func(tx *streaming.LedgerTransaction, ev *streaming.LedgerEvent) (*indexer.ParsedEvent, bool) { + if !ev.IsCreated { + return nil, false + } + if ev.ModuleName != tokenTransferEventModule || ev.TemplateName != tokenTransferEventEntity { + return nil, false + } + + instrumentID := ev.NestedTextField("instrumentId", "id") + instrumentAdmin := ev.NestedPartyField("instrumentId", "admin") + key := indexer.InstrumentKey{Admin: instrumentAdmin, ID: instrumentID} + + if mode == indexer.FilterModeWhitelist { + if _, ok := allowedMap[key]; !ok { + logger.Debug("skipping event for unlisted instrument", + zap.String("instrument_id", instrumentID), + zap.String("instrument_admin", instrumentAdmin), + zap.String("contract_id", ev.ContractID), + ) + return nil, false + } + } + + var fromPartyID *string + if !ev.IsNone("fromParty") { + v := ev.OptionalPartyField("fromParty") + if v != "" { + fromPartyID = &v + } + } + + var toPartyID *string + if !ev.IsNone("toParty") { + v := ev.OptionalPartyField("toParty") + if v != "" { + toPartyID = &v + } + } + + var externalTxID *string + if v := ev.OptionalMetaLookup("meta", metaKeyExternalTxID); v != "" { + externalTxID = &v + } + + var externalAddress *string + if v := ev.OptionalMetaLookup("meta", metaKeyExternalAddress); v != "" { + externalAddress = &v + } + + var fingerprint *string + if v := ev.OptionalMetaLookup("meta", metaKeyFingerprint); v != "" { + fingerprint = &v + } + + var et indexer.EventType + switch { + case fromPartyID == nil && toPartyID != nil: + et = indexer.EventMint + case fromPartyID != nil && toPartyID == nil: + et = indexer.EventBurn + case fromPartyID != nil && toPartyID != nil: + et = indexer.EventTransfer + default: + logger.Warn("dropping TokenTransferEvent with both parties absent", + zap.String("contract_id", ev.ContractID), + zap.String("tx_id", tx.UpdateID), + zap.String("instrument_id", instrumentID), + ) + return nil, false + } + + return &indexer.ParsedEvent{ + InstrumentID: instrumentID, + InstrumentAdmin: ev.NestedPartyField("instrumentId", "admin"), + Issuer: ev.PartyField("issuer"), + EventType: et, + Amount: ev.NumericField("amount"), + FromPartyID: fromPartyID, + ToPartyID: toPartyID, + ExternalTxID: externalTxID, + ExternalAddress: externalAddress, + Fingerprint: fingerprint, + ContractID: ev.ContractID, + TxID: tx.UpdateID, + LedgerOffset: tx.Offset, + Timestamp: ev.TimestampField("timestamp"), + EffectiveTime: tx.EffectiveTime, + }, true + } +} diff --git a/pkg/indexer/engine/decoder_test.go b/pkg/indexer/engine/decoder_test.go new file mode 100644 index 00000000..65e5cce2 --- /dev/null +++ b/pkg/indexer/engine/decoder_test.go @@ -0,0 +1,248 @@ +package engine + +import ( + "testing" + "time" + + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + "github.com/chainsafe/canton-middleware/pkg/indexer" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// --------------------------------------------------------------------------- +// Shared test constants (accessible from processor_test.go, same package) +// --------------------------------------------------------------------------- + +const ( + testContractID = "contract-id-1" + testInstrumentID = "DEMO" + testInstrumentAdmin = "issuer-party::abc123" + testIssuer = "issuer-party::abc123" + testAmount = "100.000000000000000000" + testRecipient = "recipient-party::def456" + testSender = "sender-party::ghi789" +) + +// --------------------------------------------------------------------------- +// Test event / transaction builders +// --------------------------------------------------------------------------- + +func makeTransferEvent(contractID string, fromParty, toParty streaming.FieldValue, extra map[string]streaming.FieldValue) *streaming.LedgerEvent { + fields := map[string]streaming.FieldValue{ + "instrumentId": streaming.MakeRecordField(map[string]streaming.FieldValue{ + "id": streaming.MakeTextField(testInstrumentID), + "admin": streaming.MakePartyField(testInstrumentAdmin), + }), + "issuer": streaming.MakePartyField(testIssuer), + "fromParty": fromParty, + "toParty": toParty, + "amount": streaming.MakeNumericField(testAmount), + "timestamp": streaming.MakeTimestampField(time.Unix(1_700_000_000, 0)), + "meta": streaming.MakeNoneField(), + } + for k, v := range extra { + fields[k] = v + } + return streaming.NewLedgerEvent(contractID, "pkg-id", tokenTransferEventModule, tokenTransferEventEntity, true, fields) +} + +func makeTx(offset int64, events ...*streaming.LedgerEvent) *streaming.LedgerTransaction { + return &streaming.LedgerTransaction{ + UpdateID: "update-" + string(rune('0'+offset)), + Offset: offset, + EffectiveTime: time.Unix(1_700_000_000, 0), + Events: events, + } +} + +// decodeAll applies decode to every event in tx and collects successful results. +func decodeAll(decode func(*streaming.LedgerTransaction, *streaming.LedgerEvent) (*indexer.ParsedEvent, bool), tx *streaming.LedgerTransaction) []*indexer.ParsedEvent { + var out []*indexer.ParsedEvent + for _, ev := range tx.Events { + if pe, ok := decode(tx, ev); ok { + out = append(out, pe) + } + } + return out +} + +// --------------------------------------------------------------------------- +// Decoder tests +// --------------------------------------------------------------------------- + +func TestDecoder_FilterModeAll_Mint(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(1, ev)) + + require.Len(t, got, 1) + pe := got[0] + assert.Equal(t, indexer.EventMint, pe.EventType) + assert.Nil(t, pe.FromPartyID) + assert.Equal(t, testRecipient, *pe.ToPartyID) + assert.Equal(t, testInstrumentID, pe.InstrumentID) + assert.Equal(t, testInstrumentAdmin, pe.InstrumentAdmin) + assert.Equal(t, testIssuer, pe.Issuer) + assert.Equal(t, testAmount, pe.Amount) + assert.Equal(t, testContractID, pe.ContractID) + assert.Equal(t, int64(1), pe.LedgerOffset) + assert.Equal(t, "update-1", pe.TxID) + assert.Equal(t, time.Unix(1_700_000_000, 0), pe.EffectiveTime) +} + +func TestDecoder_FilterModeAll_Burn(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeSomePartyField(testSender), streaming.MakeNoneField(), nil) + got := decodeAll(decode, makeTx(2, ev)) + + require.Len(t, got, 1) + pe := got[0] + assert.Equal(t, indexer.EventBurn, pe.EventType) + assert.Equal(t, testSender, *pe.FromPartyID) + assert.Nil(t, pe.ToPartyID) +} + +func TestDecoder_FilterModeAll_Transfer(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeSomePartyField(testSender), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(3, ev)) + + require.Len(t, got, 1) + pe := got[0] + assert.Equal(t, indexer.EventTransfer, pe.EventType) + assert.Equal(t, testSender, *pe.FromPartyID) + assert.Equal(t, testRecipient, *pe.ToPartyID) +} + +func TestDecoder_BothPartiesAbsent_Dropped(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeNoneField(), nil) + got := decodeAll(decode, makeTx(4, ev)) + + assert.Empty(t, got) +} + +func TestDecoder_SkipsArchivedEvent(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := streaming.NewLedgerEvent(testContractID, "pkg-id", tokenTransferEventModule, tokenTransferEventEntity, false, nil) + got := decodeAll(decode, makeTx(5, ev)) + + assert.Empty(t, got) +} + +func TestDecoder_SkipsWrongTemplate(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev := streaming.NewLedgerEvent(testContractID, "pkg-id", "OtherModule", "OtherEntity", true, map[string]streaming.FieldValue{}) + got := decodeAll(decode, makeTx(6, ev)) + + assert.Empty(t, got) +} + +func TestDecoder_FilterModeWhitelist_Allowed(t *testing.T) { + allowed := []indexer.InstrumentKey{{Admin: testInstrumentAdmin, ID: testInstrumentID}} + decode := NewTokenTransferDecoder(indexer.FilterModeWhitelist, allowed, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(7, ev)) + + require.Len(t, got, 1) + assert.Equal(t, testInstrumentID, got[0].InstrumentID) +} + +func TestDecoder_FilterModeWhitelist_Blocked_WrongAdmin(t *testing.T) { + allowed := []indexer.InstrumentKey{{Admin: "other-issuer::xyz", ID: testInstrumentID}} + decode := NewTokenTransferDecoder(indexer.FilterModeWhitelist, allowed, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(8, ev)) + + assert.Empty(t, got) +} + +func TestDecoder_FilterModeWhitelist_Blocked_WrongID(t *testing.T) { + allowed := []indexer.InstrumentKey{{Admin: testInstrumentAdmin, ID: "OTHER"}} + decode := NewTokenTransferDecoder(indexer.FilterModeWhitelist, allowed, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(9, ev)) + + assert.Empty(t, got) +} + +func TestDecoder_FilterModeWhitelist_MultipleKeys_MatchingPasses(t *testing.T) { + allowed := []indexer.InstrumentKey{ + {Admin: "other-issuer::xyz", ID: "OTHER"}, + {Admin: testInstrumentAdmin, ID: testInstrumentID}, + } + decode := NewTokenTransferDecoder(indexer.FilterModeWhitelist, allowed, zap.NewNop()) + + ev := makeTransferEvent(testContractID, streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + got := decodeAll(decode, makeTx(10, ev)) + + require.Len(t, got, 1) +} + +func TestDecoder_BridgeMetaExtracted(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + meta := streaming.MakeSomeRecordField(map[string]streaming.FieldValue{ + "values": streaming.MakeTextMapField(map[string]string{ + metaKeyExternalTxID: "0xdeadbeef", + metaKeyExternalAddress: "0xabc", + metaKeyFingerprint: "fp-1", + }), + }) + ev := makeTransferEvent(testContractID, + streaming.MakeSomePartyField(testSender), streaming.MakeSomePartyField(testRecipient), + map[string]streaming.FieldValue{"meta": meta}, + ) + got := decodeAll(decode, makeTx(11, ev)) + + require.Len(t, got, 1) + pe := got[0] + require.NotNil(t, pe.ExternalTxID) + assert.Equal(t, "0xdeadbeef", *pe.ExternalTxID) + require.NotNil(t, pe.ExternalAddress) + assert.Equal(t, "0xabc", *pe.ExternalAddress) + require.NotNil(t, pe.Fingerprint) + assert.Equal(t, "fp-1", *pe.Fingerprint) +} + +func TestDecoder_BridgeMeta_NoneField_NilPointers(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + // meta = None → all bridge fields should be nil + ev := makeTransferEvent(testContractID, + streaming.MakeSomePartyField(testSender), streaming.MakeSomePartyField(testRecipient), nil, + ) + got := decodeAll(decode, makeTx(12, ev)) + + require.Len(t, got, 1) + pe := got[0] + assert.Nil(t, pe.ExternalTxID) + assert.Nil(t, pe.ExternalAddress) + assert.Nil(t, pe.Fingerprint) +} + +func TestDecoder_MultipleEventsInTx_OnlyMatchingReturned(t *testing.T) { + decode := NewTokenTransferDecoder(indexer.FilterModeAll, nil, zap.NewNop()) + + ev1 := makeTransferEvent("c-1", streaming.MakeNoneField(), streaming.MakeSomePartyField(testRecipient), nil) + ev2 := makeTransferEvent("c-2", streaming.MakeSomePartyField(testSender), streaming.MakeNoneField(), nil) + ev3 := streaming.NewLedgerEvent("c-3", "pkg", "Other", "Template", true, map[string]streaming.FieldValue{}) + + got := decodeAll(decode, makeTx(13, ev1, ev2, ev3)) + + require.Len(t, got, 2) + assert.Equal(t, "c-1", got[0].ContractID) + assert.Equal(t, "c-2", got[1].ContractID) +} diff --git a/pkg/indexer/fetcher.go b/pkg/indexer/engine/fetcher.go similarity index 56% rename from pkg/indexer/fetcher.go rename to pkg/indexer/engine/fetcher.go index bf0fb43f..498424e7 100644 --- a/pkg/indexer/fetcher.go +++ b/pkg/indexer/engine/fetcher.go @@ -1,10 +1,11 @@ -package indexer +package engine import ( "context" "sync/atomic" "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + "github.com/chainsafe/canton-middleware/pkg/indexer" "go.uber.org/zap" ) @@ -12,36 +13,43 @@ import ( const txChannelCap = 100 // Fetcher opens a live Canton stream from a caller-supplied resume offset and -// exposes the resulting transactions via Events. +// exposes the resulting batches via Events. // // Typical usage: // -// f := indexer.NewFetcher(streamClient, templateID, logger) +// decode := indexer.NewTokenTransferDecoder(mode, allowed, logger) +// f := indexer.NewFetcher(streamClient, templateID, decode, logger) // f.Start(ctx, lastProcessedOffset) -// for tx := range f.Events() { ... } +// for batch := range f.Events() { ... } type Fetcher struct { - stream streaming.Streamer + stream *streaming.Stream[*indexer.ParsedEvent] templateID streaming.TemplateID - out chan *streaming.LedgerTransaction + out chan *streaming.Batch[*indexer.ParsedEvent] logger *zap.Logger } // NewFetcher creates a new Fetcher. // -// - stream: Canton streaming client (handles reconnection, auth, backoff) +// - streamer: Canton streaming client (handles reconnection, auth, backoff) // - templateID: DAML template to subscribe to (e.g. TokenTransferEvent) +// - decode: per-event decode function (see NewTokenTransferDecoder) // - logger: caller-provided logger -func NewFetcher(stream streaming.Streamer, templateID streaming.TemplateID, logger *zap.Logger) *Fetcher { +func NewFetcher( + streamer streaming.Streamer, + templateID streaming.TemplateID, + decode func(*streaming.LedgerTransaction, *streaming.LedgerEvent) (*indexer.ParsedEvent, bool), + logger *zap.Logger, +) *Fetcher { return &Fetcher{ - stream: stream, + stream: streaming.NewStream(streamer, decode), templateID: templateID, - out: make(chan *streaming.LedgerTransaction, txChannelCap), + out: make(chan *streaming.Batch[*indexer.ParsedEvent], txChannelCap), logger: logger, } } // Start begins streaming from offset in a background goroutine. It is non-blocking. -// The goroutine exits when ctx is cancelled or the underlying stream closes. +// The goroutine exits when ctx is canceled or the underlying stream closes. // // Start must be called exactly once before Events is used. func (f *Fetcher) Start(ctx context.Context, offset int64) { @@ -53,7 +61,7 @@ func (f *Fetcher) Start(ctx context.Context, offset int64) { var lastOffset int64 atomic.StoreInt64(&lastOffset, offset) - txCh := f.stream.Subscribe(ctx, streaming.SubscribeRequest{ + batchCh := f.stream.Subscribe(ctx, streaming.SubscribeRequest{ FromOffset: offset, TemplateIDs: []streaming.TemplateID{f.templateID}, }, &lastOffset) @@ -62,12 +70,12 @@ func (f *Fetcher) Start(ctx context.Context, offset int64) { defer close(f.out) for { select { - case tx, ok := <-txCh: + case batch, ok := <-batchCh: if !ok { return } select { - case f.out <- tx: + case f.out <- batch: case <-ctx.Done(): return } @@ -78,8 +86,8 @@ func (f *Fetcher) Start(ctx context.Context, offset int64) { }() } -// Events returns the read-only channel of LedgerTransactions. +// Events returns the read-only channel of decoded batches. // Must be called after Start. The channel is closed when the stream terminates. -func (f *Fetcher) Events() <-chan *streaming.LedgerTransaction { +func (f *Fetcher) Events() <-chan *streaming.Batch[*indexer.ParsedEvent] { return f.out } diff --git a/pkg/indexer/engine/mocks/mock_event_fetcher.go b/pkg/indexer/engine/mocks/mock_event_fetcher.go new file mode 100644 index 00000000..76757f06 --- /dev/null +++ b/pkg/indexer/engine/mocks/mock_event_fetcher.go @@ -0,0 +1,121 @@ +// Code generated by mockery v2.53.6. DO NOT EDIT. + +package mocks + +import ( + context "context" + + indexer "github.com/chainsafe/canton-middleware/pkg/indexer" + + mock "github.com/stretchr/testify/mock" + + streaming "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" +) + +// EventFetcher is an autogenerated mock type for the EventFetcher type +type EventFetcher struct { + mock.Mock +} + +type EventFetcher_Expecter struct { + mock *mock.Mock +} + +func (_m *EventFetcher) EXPECT() *EventFetcher_Expecter { + return &EventFetcher_Expecter{mock: &_m.Mock} +} + +// Events provides a mock function with no fields +func (_m *EventFetcher) Events() <-chan *streaming.Batch[*indexer.ParsedEvent] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Events") + } + + var r0 <-chan *streaming.Batch[*indexer.ParsedEvent] + if rf, ok := ret.Get(0).(func() <-chan *streaming.Batch[*indexer.ParsedEvent]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *streaming.Batch[*indexer.ParsedEvent]) + } + } + + return r0 +} + +// EventFetcher_Events_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Events' +type EventFetcher_Events_Call struct { + *mock.Call +} + +// Events is a helper method to define mock.On call +func (_e *EventFetcher_Expecter) Events() *EventFetcher_Events_Call { + return &EventFetcher_Events_Call{Call: _e.mock.On("Events")} +} + +func (_c *EventFetcher_Events_Call) Run(run func()) *EventFetcher_Events_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EventFetcher_Events_Call) Return(_a0 <-chan *streaming.Batch[*indexer.ParsedEvent]) *EventFetcher_Events_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EventFetcher_Events_Call) RunAndReturn(run func() <-chan *streaming.Batch[*indexer.ParsedEvent]) *EventFetcher_Events_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: ctx, offset +func (_m *EventFetcher) Start(ctx context.Context, offset int64) { + _m.Called(ctx, offset) +} + +// EventFetcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type EventFetcher_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +// - offset int64 +func (_e *EventFetcher_Expecter) Start(ctx interface{}, offset interface{}) *EventFetcher_Start_Call { + return &EventFetcher_Start_Call{Call: _e.mock.On("Start", ctx, offset)} +} + +func (_c *EventFetcher_Start_Call) Run(run func(ctx context.Context, offset int64)) *EventFetcher_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64)) + }) + return _c +} + +func (_c *EventFetcher_Start_Call) Return() *EventFetcher_Start_Call { + _c.Call.Return() + return _c +} + +func (_c *EventFetcher_Start_Call) RunAndReturn(run func(context.Context, int64)) *EventFetcher_Start_Call { + _c.Run(run) + return _c +} + +// NewEventFetcher creates a new instance of EventFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventFetcher(t interface { + mock.TestingT + Cleanup(func()) +}) *EventFetcher { + mock := &EventFetcher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/indexer/engine/mocks/mock_store.go b/pkg/indexer/engine/mocks/mock_store.go new file mode 100644 index 00000000..e5b1bf99 --- /dev/null +++ b/pkg/indexer/engine/mocks/mock_store.go @@ -0,0 +1,335 @@ +// Code generated by mockery v2.53.6. DO NOT EDIT. + +package mocks + +import ( + context "context" + + indexer "github.com/chainsafe/canton-middleware/pkg/indexer" + + mock "github.com/stretchr/testify/mock" +) + +// Store is an autogenerated mock type for the Store type +type Store struct { + mock.Mock +} + +type Store_Expecter struct { + mock *mock.Mock +} + +func (_m *Store) EXPECT() *Store_Expecter { + return &Store_Expecter{mock: &_m.Mock} +} + +// ApplyBalanceDelta provides a mock function with given fields: ctx, partyID, instrumentAdmin, instrumentID, delta +func (_m *Store) ApplyBalanceDelta(ctx context.Context, partyID string, instrumentAdmin string, instrumentID string, delta string) error { + ret := _m.Called(ctx, partyID, instrumentAdmin, instrumentID, delta) + + if len(ret) == 0 { + panic("no return value specified for ApplyBalanceDelta") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) error); ok { + r0 = rf(ctx, partyID, instrumentAdmin, instrumentID, delta) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store_ApplyBalanceDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplyBalanceDelta' +type Store_ApplyBalanceDelta_Call struct { + *mock.Call +} + +// ApplyBalanceDelta is a helper method to define mock.On call +// - ctx context.Context +// - partyID string +// - instrumentAdmin string +// - instrumentID string +// - delta string +func (_e *Store_Expecter) ApplyBalanceDelta(ctx interface{}, partyID interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplyBalanceDelta_Call { + return &Store_ApplyBalanceDelta_Call{Call: _e.mock.On("ApplyBalanceDelta", ctx, partyID, instrumentAdmin, instrumentID, delta)} +} + +func (_c *Store_ApplyBalanceDelta_Call) Run(run func(ctx context.Context, partyID string, instrumentAdmin string, instrumentID string, delta string)) *Store_ApplyBalanceDelta_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *Store_ApplyBalanceDelta_Call) Return(_a0 error) *Store_ApplyBalanceDelta_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Store_ApplyBalanceDelta_Call) RunAndReturn(run func(context.Context, string, string, string, string) error) *Store_ApplyBalanceDelta_Call { + _c.Call.Return(run) + return _c +} + +// ApplySupplyDelta provides a mock function with given fields: ctx, instrumentAdmin, instrumentID, delta +func (_m *Store) ApplySupplyDelta(ctx context.Context, instrumentAdmin string, instrumentID string, delta string) error { + ret := _m.Called(ctx, instrumentAdmin, instrumentID, delta) + + if len(ret) == 0 { + panic("no return value specified for ApplySupplyDelta") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) error); ok { + r0 = rf(ctx, instrumentAdmin, instrumentID, delta) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store_ApplySupplyDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySupplyDelta' +type Store_ApplySupplyDelta_Call struct { + *mock.Call +} + +// ApplySupplyDelta is a helper method to define mock.On call +// - ctx context.Context +// - instrumentAdmin string +// - instrumentID string +// - delta string +func (_e *Store_Expecter) ApplySupplyDelta(ctx interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplySupplyDelta_Call { + return &Store_ApplySupplyDelta_Call{Call: _e.mock.On("ApplySupplyDelta", ctx, instrumentAdmin, instrumentID, delta)} +} + +func (_c *Store_ApplySupplyDelta_Call) Run(run func(ctx context.Context, instrumentAdmin string, instrumentID string, delta string)) *Store_ApplySupplyDelta_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string)) + }) + return _c +} + +func (_c *Store_ApplySupplyDelta_Call) Return(_a0 error) *Store_ApplySupplyDelta_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Store_ApplySupplyDelta_Call) RunAndReturn(run func(context.Context, string, string, string) error) *Store_ApplySupplyDelta_Call { + _c.Call.Return(run) + return _c +} + +// LatestOffset provides a mock function with given fields: ctx +func (_m *Store) LatestOffset(ctx context.Context) (int64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LatestOffset") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Store_LatestOffset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestOffset' +type Store_LatestOffset_Call struct { + *mock.Call +} + +// LatestOffset is a helper method to define mock.On call +// - ctx context.Context +func (_e *Store_Expecter) LatestOffset(ctx interface{}) *Store_LatestOffset_Call { + return &Store_LatestOffset_Call{Call: _e.mock.On("LatestOffset", ctx)} +} + +func (_c *Store_LatestOffset_Call) Run(run func(ctx context.Context)) *Store_LatestOffset_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Store_LatestOffset_Call) Return(_a0 int64, _a1 error) *Store_LatestOffset_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Store_LatestOffset_Call) RunAndReturn(run func(context.Context) (int64, error)) *Store_LatestOffset_Call { + _c.Call.Return(run) + return _c +} + +// RunInTx provides a mock function with given fields: ctx, fn +func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, indexer.Store) error) error { + ret := _m.Called(ctx, fn) + + if len(ret) == 0 { + panic("no return value specified for RunInTx") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(context.Context, indexer.Store) error) error); ok { + r0 = rf(ctx, fn) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store_RunInTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunInTx' +type Store_RunInTx_Call struct { + *mock.Call +} + +// RunInTx is a helper method to define mock.On call +// - ctx context.Context +// - fn func(context.Context , indexer.Store) error +func (_e *Store_Expecter) RunInTx(ctx interface{}, fn interface{}) *Store_RunInTx_Call { + return &Store_RunInTx_Call{Call: _e.mock.On("RunInTx", ctx, fn)} +} + +func (_c *Store_RunInTx_Call) Run(run func(ctx context.Context, fn func(context.Context, indexer.Store) error)) *Store_RunInTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(func(context.Context, indexer.Store) error)) + }) + return _c +} + +func (_c *Store_RunInTx_Call) Return(_a0 error) *Store_RunInTx_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Store_RunInTx_Call) RunAndReturn(run func(context.Context, func(context.Context, indexer.Store) error) error) *Store_RunInTx_Call { + _c.Call.Return(run) + return _c +} + +// SaveBatch provides a mock function with given fields: ctx, offset, events +func (_m *Store) SaveBatch(ctx context.Context, offset int64, events []*indexer.ParsedEvent) error { + ret := _m.Called(ctx, offset, events) + + if len(ret) == 0 { + panic("no return value specified for SaveBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, []*indexer.ParsedEvent) error); ok { + r0 = rf(ctx, offset, events) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store_SaveBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBatch' +type Store_SaveBatch_Call struct { + *mock.Call +} + +// SaveBatch is a helper method to define mock.On call +// - ctx context.Context +// - offset int64 +// - events []*indexer.ParsedEvent +func (_e *Store_Expecter) SaveBatch(ctx interface{}, offset interface{}, events interface{}) *Store_SaveBatch_Call { + return &Store_SaveBatch_Call{Call: _e.mock.On("SaveBatch", ctx, offset, events)} +} + +func (_c *Store_SaveBatch_Call) Run(run func(ctx context.Context, offset int64, events []*indexer.ParsedEvent)) *Store_SaveBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64), args[2].([]*indexer.ParsedEvent)) + }) + return _c +} + +func (_c *Store_SaveBatch_Call) Return(_a0 error) *Store_SaveBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Store_SaveBatch_Call) RunAndReturn(run func(context.Context, int64, []*indexer.ParsedEvent) error) *Store_SaveBatch_Call { + _c.Call.Return(run) + return _c +} + +// UpsertToken provides a mock function with given fields: ctx, token +func (_m *Store) UpsertToken(ctx context.Context, token *indexer.Token) error { + ret := _m.Called(ctx, token) + + if len(ret) == 0 { + panic("no return value specified for UpsertToken") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *indexer.Token) error); ok { + r0 = rf(ctx, token) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store_UpsertToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpsertToken' +type Store_UpsertToken_Call struct { + *mock.Call +} + +// UpsertToken is a helper method to define mock.On call +// - ctx context.Context +// - token *indexer.Token +func (_e *Store_Expecter) UpsertToken(ctx interface{}, token interface{}) *Store_UpsertToken_Call { + return &Store_UpsertToken_Call{Call: _e.mock.On("UpsertToken", ctx, token)} +} + +func (_c *Store_UpsertToken_Call) Run(run func(ctx context.Context, token *indexer.Token)) *Store_UpsertToken_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*indexer.Token)) + }) + return _c +} + +func (_c *Store_UpsertToken_Call) Return(_a0 error) *Store_UpsertToken_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Store_UpsertToken_Call) RunAndReturn(run func(context.Context, *indexer.Token) error) *Store_UpsertToken_Call { + _c.Call.Return(run) + return _c +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStore(t interface { + mock.TestingT + Cleanup(func()) +}) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/indexer/engine/processor.go b/pkg/indexer/engine/processor.go new file mode 100644 index 00000000..a50fc893 --- /dev/null +++ b/pkg/indexer/engine/processor.go @@ -0,0 +1,203 @@ +package engine + +import ( + "context" + "fmt" + "time" + + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + "github.com/chainsafe/canton-middleware/pkg/indexer" + + "go.uber.org/zap" +) + +var ( + processorRetryBaseDelay = 5 * time.Second + processorRetryMaxDelay = 60 * time.Second +) + +// EventFetcher is the interface the Processor uses to start and consume the ledger stream. +// +//go:generate mockery --name EventFetcher --output mocks --outpkg mocks --filename mock_event_fetcher.go --with-expecter +type EventFetcher interface { + // Start begins streaming from offset in a background goroutine. + // Must be called exactly once before Events is used. + Start(ctx context.Context, offset int64) + + // Events returns the read-only channel of decoded batches. + // The channel is closed when the stream terminates. + Events() <-chan *streaming.Batch[*indexer.ParsedEvent] +} + +// Store is the persistence contract for the Processor. Defined in pkg/indexer. +type Store = indexer.Store + +// Processor is the main run loop of the indexer. It wires the EventFetcher to the +// Store and writes decoded events atomically. +// +// Processing is sequential — one batch at a time. The ordering guarantee comes from +// the Canton ledger: transactions within a party's projection are delivered in +// strictly increasing offset order. +type Processor struct { + fetcher EventFetcher + store Store + logger *zap.Logger +} + +// NewProcessor creates a Processor. +func NewProcessor(fetcher EventFetcher, store Store, logger *zap.Logger) *Processor { + return &Processor{ + fetcher: fetcher, + store: store, + logger: logger, + } +} + +// Run starts the indexer loop. It blocks until ctx is canceled or the fetcher +// channel closes, then returns ctx.Err() or nil respectively. +// +// On startup Run loads the resume offset from the store and passes it to the fetcher, +// so callers do not need to track offsets themselves. +// +// If processBatch fails (store error) Run retries the same batch with exponential +// backoff (5s → 60s) until it succeeds or ctx is canceled. The offset is never +// advanced past a failed batch — no event is silently dropped. +func (p *Processor) Run(ctx context.Context) error { + offset, err := p.store.LatestOffset(ctx) + if err != nil { + return fmt.Errorf("load resume offset: %w", err) + } + + p.logger.Info("indexer processor starting", zap.Int64("resume_offset", offset)) + p.fetcher.Start(ctx, offset) + + for { + select { + case batch, ok := <-p.fetcher.Events(): + if !ok { + p.logger.Info("indexer stream closed") + return nil + } + if err := p.processBatchWithRetry(ctx, batch); err != nil { + // Only reachable when ctx is canceled. + return err + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// processBatchWithRetry calls processBatch and retries with exponential backoff on failure. +// It returns only when the batch is successfully persisted or ctx is canceled. +func (p *Processor) processBatchWithRetry(ctx context.Context, batch *streaming.Batch[*indexer.ParsedEvent]) error { + delay := processorRetryBaseDelay + + for { + err := p.processBatch(ctx, batch) + if err == nil { + return nil + } + + p.logger.Error("failed to process batch, retrying", + zap.String("update_id", batch.UpdateID), + zap.Int64("offset", batch.Offset), + zap.Duration("backoff", delay), + zap.Error(err), + ) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + } + + delay = min(delay*2, processorRetryMaxDelay) + } +} + +// processBatch persists a single decoded batch inside a single database transaction. +// All writes — token upserts, supply/balance deltas, events, and offset advance — are +// committed atomically. On any error the transaction is rolled back and the caller retries. +func (p *Processor) processBatch(ctx context.Context, batch *streaming.Batch[*indexer.ParsedEvent]) error { + err := p.store.RunInTx(ctx, func(ctx context.Context, tx Store) error { + for _, e := range batch.Items { + if err := tx.UpsertToken(ctx, tokenFromEvent(e)); err != nil { + return fmt.Errorf("upsert token: %w", err) + } + + if admin, id, delta, ok := supplyDeltaFromEvent(e); ok { + if err := tx.ApplySupplyDelta(ctx, admin, id, delta); err != nil { + return fmt.Errorf("apply supply delta: %w", err) + } + } + + for _, u := range balanceUpdatesFromEvent(e) { + if err := tx.ApplyBalanceDelta(ctx, u[0], e.InstrumentAdmin, e.InstrumentID, u[1]); err != nil { + return fmt.Errorf("apply balance delta: %w", err) + } + } + } + + return tx.SaveBatch(ctx, batch.Offset, batch.Items) + }) + if err != nil { + return fmt.Errorf("tx at offset %d: %w", batch.Offset, err) + } + + if len(batch.Items) > 0 { + p.logger.Debug("indexed batch", + zap.String("update_id", batch.UpdateID), + zap.Int64("offset", batch.Offset), + zap.Int("events", len(batch.Items)), + ) + } + + return nil +} + +// tokenFromEvent constructs a Token from a ParsedEvent for UpsertToken. +// TotalSupply and HolderCount are left zero — the store initializes them on first +// insert and maintains them via ApplySupplyDelta / UpsertBalance thereafter. +func tokenFromEvent(e *indexer.ParsedEvent) *indexer.Token { + return &indexer.Token{ + InstrumentAdmin: e.InstrumentAdmin, + InstrumentID: e.InstrumentID, + Issuer: e.Issuer, + FirstSeenOffset: e.LedgerOffset, + FirstSeenAt: e.EffectiveTime, + } +} + +// supplyDeltaFromEvent returns the signed supply delta for MINT (+amount) and +// BURN (-amount). Returns ok=false for TRANSFER, which leaves total supply unchanged. +func supplyDeltaFromEvent(e *indexer.ParsedEvent) (instrumentAdmin, instrumentID, delta string, ok bool) { + switch e.EventType { + case indexer.EventMint: + return e.InstrumentAdmin, e.InstrumentID, e.Amount, true + case indexer.EventBurn: + return e.InstrumentAdmin, e.InstrumentID, "-" + e.Amount, true + default: + return "", "", "", false + } +} + +// balanceUpdatesFromEvent returns [partyID, signedDelta] pairs for each balance +// affected by an event. Mirrors supplyDeltaFromEvent but at the per-party level. +// +// MINT: toParty +amount +// BURN: fromParty −amount +// TRANSFER: fromParty −amount, toParty +amount +func balanceUpdatesFromEvent(e *indexer.ParsedEvent) [][2]string { + neg := "-" + e.Amount + switch e.EventType { + case indexer.EventMint: + return [][2]string{{*e.ToPartyID, e.Amount}} + case indexer.EventBurn: + return [][2]string{{*e.FromPartyID, neg}} + case indexer.EventTransfer: + return [][2]string{{*e.FromPartyID, neg}, {*e.ToPartyID, e.Amount}} + default: + return nil + } +} diff --git a/pkg/indexer/engine/processor_test.go b/pkg/indexer/engine/processor_test.go new file mode 100644 index 00000000..03301315 --- /dev/null +++ b/pkg/indexer/engine/processor_test.go @@ -0,0 +1,338 @@ +package engine + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" + "github.com/chainsafe/canton-middleware/pkg/indexer" + "github.com/chainsafe/canton-middleware/pkg/indexer/engine/mocks" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +// --------------------------------------------------------------------------- +// Test event / batch builders (reuse constants from decoder_test.go) +// --------------------------------------------------------------------------- + +func mintEvent() *indexer.ParsedEvent { + r := testRecipient + return &indexer.ParsedEvent{ + EventType: indexer.EventMint, + InstrumentID: testInstrumentID, + InstrumentAdmin: testInstrumentAdmin, + Issuer: testIssuer, + Amount: testAmount, + ToPartyID: &r, + ContractID: testContractID, + LedgerOffset: 1, + EffectiveTime: time.Unix(1_700_000_000, 0), + } +} + +func burnEvent() *indexer.ParsedEvent { + s := testSender + return &indexer.ParsedEvent{ + EventType: indexer.EventBurn, + InstrumentID: testInstrumentID, + InstrumentAdmin: testInstrumentAdmin, + Issuer: testIssuer, + Amount: testAmount, + FromPartyID: &s, + ContractID: testContractID, + LedgerOffset: 2, + EffectiveTime: time.Unix(1_700_000_000, 0), + } +} + +func transferEventParsed() *indexer.ParsedEvent { + s := testSender + r := testRecipient + return &indexer.ParsedEvent{ + EventType: indexer.EventTransfer, + InstrumentID: testInstrumentID, + InstrumentAdmin: testInstrumentAdmin, + Issuer: testIssuer, + Amount: testAmount, + FromPartyID: &s, + ToPartyID: &r, + ContractID: testContractID, + LedgerOffset: 3, + EffectiveTime: time.Unix(1_700_000_000, 0), + } +} + +func makeProcBatch(offset int64, events ...*indexer.ParsedEvent) *streaming.Batch[*indexer.ParsedEvent] { + return &streaming.Batch[*indexer.ParsedEvent]{ + Offset: offset, + UpdateID: "update-" + string(rune('0'+offset)), + Items: events, + } +} + +// feedCh sends batches into a buffered channel and closes it. +func feedCh(batches ...*streaming.Batch[*indexer.ParsedEvent]) <-chan *streaming.Batch[*indexer.ParsedEvent] { + ch := make(chan *streaming.Batch[*indexer.ParsedEvent], len(batches)) + for _, b := range batches { + ch <- b + } + close(ch) + return ch +} + +// setupRunInTx wires RunInTx to immediately execute its callback with the mock store. +func setupRunInTx(store *mocks.Store) { + store.EXPECT().RunInTx(mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, fn func(context.Context, indexer.Store) error) error { + return fn(ctx, store) + }) +} + +// --------------------------------------------------------------------------- +// tokenFromEvent +// --------------------------------------------------------------------------- + +func TestTokenFromEvent(t *testing.T) { + e := mintEvent() + tok := tokenFromEvent(e) + + assert.Equal(t, testInstrumentAdmin, tok.InstrumentAdmin) + assert.Equal(t, testInstrumentID, tok.InstrumentID) + assert.Equal(t, testIssuer, tok.Issuer) + assert.Equal(t, int64(1), tok.FirstSeenOffset) + assert.Equal(t, time.Unix(1_700_000_000, 0), tok.FirstSeenAt) + // TotalSupply and HolderCount are left at zero — the store maintains them. + assert.Empty(t, tok.TotalSupply) + assert.Equal(t, int64(0), tok.HolderCount) +} + +// --------------------------------------------------------------------------- +// supplyDeltaFromEvent +// --------------------------------------------------------------------------- + +func TestSupplyDeltaFromEvent_Mint(t *testing.T) { + _, _, delta, ok := supplyDeltaFromEvent(mintEvent()) + require.True(t, ok) + assert.Equal(t, testAmount, delta) +} + +func TestSupplyDeltaFromEvent_Burn(t *testing.T) { + _, _, delta, ok := supplyDeltaFromEvent(burnEvent()) + require.True(t, ok) + assert.Equal(t, "-"+testAmount, delta) +} + +func TestSupplyDeltaFromEvent_Transfer_NoOp(t *testing.T) { + instrumentAdmin, instrumentID, delta, ok := supplyDeltaFromEvent(transferEventParsed()) + assert.Empty(t, instrumentAdmin) + assert.Empty(t, instrumentID) + assert.Empty(t, delta) + assert.False(t, ok) +} + +// --------------------------------------------------------------------------- +// balanceUpdatesFromEvent +// --------------------------------------------------------------------------- + +func TestBalanceUpdatesFromEvent_Mint(t *testing.T) { + updates := balanceUpdatesFromEvent(mintEvent()) + require.Len(t, updates, 1) + assert.Equal(t, testRecipient, updates[0][0]) + assert.Equal(t, testAmount, updates[0][1]) +} + +func TestBalanceUpdatesFromEvent_Burn(t *testing.T) { + updates := balanceUpdatesFromEvent(burnEvent()) + require.Len(t, updates, 1) + assert.Equal(t, testSender, updates[0][0]) + assert.Equal(t, "-"+testAmount, updates[0][1]) +} + +func TestBalanceUpdatesFromEvent_Transfer(t *testing.T) { + updates := balanceUpdatesFromEvent(transferEventParsed()) + require.Len(t, updates, 2) + assert.Equal(t, testSender, updates[0][0]) + assert.Equal(t, "-"+testAmount, updates[0][1]) + assert.Equal(t, testRecipient, updates[1][0]) + assert.Equal(t, testAmount, updates[1][1]) +} + +// --------------------------------------------------------------------------- +// Processor.Run: startup / lifecycle +// --------------------------------------------------------------------------- + +func TestProcessor_Run_LoadOffsetError(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + loadErr := errors.New("db down") + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), loadErr) + + p := NewProcessor(fetcher, store, zap.NewNop()) + err := p.Run(context.Background()) + + require.Error(t, err) + assert.ErrorIs(t, err, loadErr) +} + +func TestProcessor_Run_StreamClosed_ReturnsNil(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(5), nil) + fetcher.EXPECT().Start(mock.Anything, int64(5)) + fetcher.EXPECT().Events().Return(feedCh()) + + p := NewProcessor(fetcher, store, zap.NewNop()) + assert.NoError(t, p.Run(context.Background())) +} + +func TestProcessor_Run_ContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + + ch := make(chan *streaming.Batch[*indexer.ParsedEvent]) // never closed / sent + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return((<-chan *streaming.Batch[*indexer.ParsedEvent])(ch)) + + p := NewProcessor(fetcher, store, zap.NewNop()) + + done := make(chan error, 1) + go func() { done <- p.Run(ctx) }() + + cancel() + assert.ErrorIs(t, <-done, context.Canceled) +} + +// --------------------------------------------------------------------------- +// Processor.Run: per-event-type store call verification +// --------------------------------------------------------------------------- + +func TestProcessor_Run_MintBatch(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + ev := mintEvent() + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1, ev))) + + setupRunInTx(store) + store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) + store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) + store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) + store.EXPECT().SaveBatch(mock.Anything, int64(1), []*indexer.ParsedEvent{ev}).Return(nil) + + require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +func TestProcessor_Run_BurnBatch(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + ev := burnEvent() + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(2, ev))) + + setupRunInTx(store) + store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) + store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) + store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) + store.EXPECT().SaveBatch(mock.Anything, int64(2), []*indexer.ParsedEvent{ev}).Return(nil) + + require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +func TestProcessor_Run_TransferBatch(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + ev := transferEventParsed() + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(3, ev))) + + setupRunInTx(store) + store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) + // Transfer: no supply delta. + store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) + store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) + store.EXPECT().SaveBatch(mock.Anything, int64(3), []*indexer.ParsedEvent{ev}).Return(nil) + + require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +func TestProcessor_Run_EmptyBatch_AdvancesOffset(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(9), nil) + fetcher.EXPECT().Start(mock.Anything, int64(9)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(10))) + + setupRunInTx(store) + // No UpsertToken, ApplySupplyDelta, or ApplyBalanceDelta calls. + store.EXPECT().SaveBatch(mock.Anything, int64(10), ([]*indexer.ParsedEvent)(nil)).Return(nil) + + require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +// --------------------------------------------------------------------------- +// Processor.Run: retry on transient store error +// --------------------------------------------------------------------------- + +func TestProcessor_Run_ProcessBatch_StoreError_Retries(t *testing.T) { + processorRetryBaseDelay = time.Millisecond + defer func() { processorRetryBaseDelay = 5 * time.Second }() + + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1))) + + // First attempt fails. + store.EXPECT().RunInTx(mock.Anything, mock.Anything). + Return(errors.New("transient db error")).Once() + // Second attempt succeeds. + store.EXPECT().RunInTx(mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, fn func(context.Context, indexer.Store) error) error { + return fn(ctx, store) + }).Once() + store.EXPECT().SaveBatch(mock.Anything, int64(1), ([]*indexer.ParsedEvent)(nil)).Return(nil) + + require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +func TestProcessor_Run_ContextCancelledDuringRetry(t *testing.T) { + processorRetryBaseDelay = time.Hour // effectively infinite + defer func() { processorRetryBaseDelay = 5 * time.Second }() + + ctx, cancel := context.WithCancel(context.Background()) + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) + fetcher.EXPECT().Start(mock.Anything, int64(0)) + fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1))) + + // Always fail; cancel immediately so the retry wait is interrupted. + store.EXPECT().RunInTx(mock.Anything, mock.Anything). + RunAndReturn(func(_ context.Context, _ func(context.Context, indexer.Store) error) error { + cancel() + return errors.New("persistent db error") + }) + + err := NewProcessor(fetcher, store, zap.NewNop()).Run(ctx) + assert.ErrorIs(t, err, context.Canceled) +} diff --git a/pkg/indexer/parser.go b/pkg/indexer/parser.go deleted file mode 100644 index 3d21d1f5..00000000 --- a/pkg/indexer/parser.go +++ /dev/null @@ -1,167 +0,0 @@ -package indexer - -import ( - "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" - - "go.uber.org/zap" -) - -const ( - tokenTransferEventModule = "CIP56.Events" - tokenTransferEventEntity = "TokenTransferEvent" - - // Metadata keys for bridge context stored in TokenTransferEvent.meta.values. - metaKeyExternalTxID = "bridge.externalTxId" - metaKeyExternalAddress = "bridge.externalAddress" - metaKeyFingerprint = "bridge.fingerprint" -) - -// Parser decodes streaming.LedgerTransactions into ParsedEvents. -// -// Filtering operates at two distinct layers: -// -// 1. gRPC (template-level): the Fetcher subscribes to CIP56.Events.TokenTransferEvent -// via TemplateID, reducing network traffic to only that contract type. This is done -// at the Canton Ledger API level and cannot filter by instrument payload. -// PackageID="" in the TemplateID enables all-packages mode, so any third-party -// CIP56-compliant token is automatically included at this layer. -// -// 2. App-level (instrument-level): the Parser further filters by InstrumentKey{Admin, ID}. -// This is necessary because the gRPC API cannot filter by contract field values. -// InstrumentKey is the Canton equivalent of an ERC-20 contract address — it uniquely -// identifies a specific token deployment by its issuer party and token identifier. -type Parser struct { - mode FilterMode - allowedInstruments map[InstrumentKey]struct{} - logger *zap.Logger -} - -// NewParser creates a new Parser. -// -// - mode: FilterModeAll or FilterModeWhitelist. -// - allowedInstruments: InstrumentKeys to accept (Canton equivalent of ERC-20 contract addresses). -// Each key is {Admin: issuerPartyID, ID: tokenID}. Both fields must match. -// Ignored when mode is FilterModeAll. -// - logger: caller-provided logger. -func NewParser(mode FilterMode, allowedInstruments []InstrumentKey, logger *zap.Logger) *Parser { - allowed := make(map[InstrumentKey]struct{}, len(allowedInstruments)) - for _, k := range allowedInstruments { - allowed[k] = struct{}{} - } - return &Parser{ - mode: mode, - allowedInstruments: allowed, - logger: logger, - } -} - -// Parse extracts and decodes all TokenTransferEvent created-events from tx. -// Returns one ParsedEvent per matched event; events that do not match the template, -// fail the instrument filter, or contain an invalid party combination are dropped. -func (p *Parser) Parse(tx *streaming.LedgerTransaction) []*ParsedEvent { - out := make([]*ParsedEvent, 0, len(tx.Events)) - - for _, ev := range tx.Events { - if !ev.IsCreated { - continue // archived events carry no field data — nothing to index - } - if ev.ModuleName != tokenTransferEventModule || ev.TemplateName != tokenTransferEventEntity { - continue - } - - instrumentID := ev.NestedTextField("instrumentId", "id") - instrumentAdmin := ev.NestedPartyField("instrumentId", "admin") - key := InstrumentKey{Admin: instrumentAdmin, ID: instrumentID} - - if !p.instrumentAllowed(key) { - p.logger.Debug("skipping event for unlisted instrument", - zap.String("instrument_id", instrumentID), - zap.String("instrument_admin", instrumentAdmin), - zap.String("contract_id", ev.ContractID), - ) - continue - } - - pe := p.decode(tx, ev, instrumentID) - if pe == nil { - continue - } - out = append(out, pe) - } - - return out -} - -// decode converts a single TokenTransferEvent LedgerEvent into a ParsedEvent. -// Returns nil when the event contains an invalid party combination (both absent). -func (p *Parser) decode(tx *streaming.LedgerTransaction, ev *streaming.LedgerEvent, instrumentID string) *ParsedEvent { - fromPartyID := optionalParty(ev, "fromParty") - toPartyID := optionalParty(ev, "toParty") - - var et EventType - switch { - case fromPartyID == nil && toPartyID != nil: - et = EventMint - case fromPartyID != nil && toPartyID == nil: - et = EventBurn - case fromPartyID != nil && toPartyID != nil: - et = EventTransfer - default: - p.logger.Warn("dropping TokenTransferEvent with both parties absent", - zap.String("contract_id", ev.ContractID), - zap.String("tx_id", tx.UpdateID), - zap.String("instrument_id", instrumentID), - ) - return nil - } - - return &ParsedEvent{ - InstrumentID: instrumentID, - InstrumentAdmin: ev.NestedPartyField("instrumentId", "admin"), - Issuer: ev.PartyField("issuer"), - EventType: et, - Amount: ev.NumericField("amount"), - FromPartyID: fromPartyID, - ToPartyID: toPartyID, - ExternalTxID: optionalMeta(ev, metaKeyExternalTxID), - ExternalAddress: optionalMeta(ev, metaKeyExternalAddress), - Fingerprint: optionalMeta(ev, metaKeyFingerprint), - ContractID: ev.ContractID, - TxID: tx.UpdateID, - LedgerOffset: tx.Offset, - Timestamp: ev.TimestampField("timestamp"), - EffectiveTime: tx.EffectiveTime, - } -} - -// instrumentAllowed returns true when the InstrumentKey passes the filter. -func (p *Parser) instrumentAllowed(key InstrumentKey) bool { - if p.mode == FilterModeAll { - return true - } - _, ok := p.allowedInstruments[key] - return ok -} - -// optionalParty extracts a DAML Optional Party field as *string. -// Returns nil when the field is None. -func optionalParty(ev *streaming.LedgerEvent, name string) *string { - if ev.IsNone(name) { - return nil - } - v := ev.OptionalPartyField(name) - if v == "" { - return nil - } - return &v -} - -// optionalMeta looks up a bridge metadata key and returns a *string. -// Returns nil when meta is None or the key is absent. -func optionalMeta(ev *streaming.LedgerEvent, key string) *string { - v := ev.OptionalMetaLookup("meta", key) - if v == "" { - return nil - } - return &v -} diff --git a/pkg/indexer/store.go b/pkg/indexer/store.go new file mode 100644 index 00000000..aa246644 --- /dev/null +++ b/pkg/indexer/store.go @@ -0,0 +1,47 @@ +package indexer + +import "context" + +// Store defines the persistence contract for the indexer Processor. +// +// The key invariant: offset and events from the same LedgerTransaction must be +// written atomically. This guarantees that after a restart the processor resumes +// from a consistent point — no event is lost and no event is double-written. +// +// The Bun-backed implementation lives in pkg/indexer/store. +// +//go:generate mockery --name Store --output engine/mocks --outpkg mocks --filename mock_store.go --with-expecter +type Store interface { + // LatestOffset returns the last successfully persisted ledger offset. + // Returns 0 and no error when no offset has been stored yet (fresh start). + // Called once at startup, outside any transaction. + LatestOffset(ctx context.Context) (int64, error) + + // RunInTx executes fn inside a single database transaction. + // On success fn's return value is nil and the transaction is committed. + // On any error the transaction is rolled back and the error is returned. + // The Store passed to fn is scoped to the transaction — all methods on it + // participate in the same underlying DB transaction. + RunInTx(ctx context.Context, fn func(ctx context.Context, tx Store) error) error + + // SaveBatch persists a batch of ParsedEvents and advances the stored ledger offset. + // Duplicate events (same ContractID) are silently skipped via ON CONFLICT DO NOTHING. + // When events is empty the offset is still advanced to skip no-op transactions on restart. + SaveBatch(ctx context.Context, offset int64, events []*ParsedEvent) error + + // UpsertToken records a token deployment on first observation. + // Subsequent calls for the same {InstrumentAdmin, InstrumentID} are no-ops + // (ON CONFLICT DO NOTHING). + UpsertToken(ctx context.Context, token *Token) error + + // ApplyBalanceDelta adjusts a party's token balance by delta (signed decimal string). + // The balance row is created at zero if it does not yet exist, then delta is added. + // The store must also update Token.HolderCount atomically: + // - increment when a party's balance transitions from zero to positive + // - decrement when a party's balance transitions from positive to zero + ApplyBalanceDelta(ctx context.Context, partyID, instrumentAdmin, instrumentID, delta string) error + + // ApplySupplyDelta adjusts a token's TotalSupply by delta (signed decimal string). + // Called once per mint (+amount) or burn (-amount). Transfer events must not call this. + ApplySupplyDelta(ctx context.Context, instrumentAdmin, instrumentID, delta string) error +} diff --git a/pkg/indexer/types.go b/pkg/indexer/types.go index 12e3fc5e..5372fee7 100644 --- a/pkg/indexer/types.go +++ b/pkg/indexer/types.go @@ -71,6 +71,53 @@ type InstrumentKey struct { ID string // instrumentId.id — the token identifier (e.g. "DEMO") } +// Token represents a CIP56 token deployment, uniquely identified by {InstrumentAdmin, InstrumentID}. +// A Token record is created the first time the indexer observes a TokenTransferEvent for a given +// instrument pair. It tracks the ERC-20-equivalent on-chain state derivable from transfer events. +// +// ERC-20 parallel: +// +// symbol() → InstrumentID +// owner/minter → InstrumentAdmin, Issuer +// totalSupply() → TotalSupply (maintained: +amount on MINT, -amount on BURN) +// HolderCount (non-standard but shown on all block explorers) +type Token struct { + // Identity — canonical composite key. + InstrumentAdmin string // instrumentId.admin — token admin/issuer party (ERC-20: deployer) + InstrumentID string // instrumentId.id — token symbol/identifier (ERC-20: symbol, e.g. "DEMO") + + // Roles. + Issuer string // issuer party on the TokenTransferEvent contract (ERC-20: minter role) + + // Supply (ERC-20: totalSupply()). + // Running total, always ≥ 0. Incremented by each MINT amount, decremented by each BURN amount. + // Updated atomically with every mint/burn via Store.ApplySupplyDelta. + TotalSupply string // decimal string, e.g. "1000000.000000000000000000" + + // Holders (ERC-20: no standard equivalent, but a standard block-explorer metric). + // Count of distinct parties currently holding a non-zero balance. + // The store increments this when a balance first becomes positive, decrements when it returns to zero. + HolderCount int64 + + // Provenance. + FirstSeenOffset int64 // ledger offset when this token was first indexed + FirstSeenAt time.Time // ledger effective time when this token was first indexed +} + +// Balance is a party's current token holding for a specific instrument. +// (ERC-20: the per-address entry in the balances mapping, i.e. balanceOf(address).) +// +// Amount is a non-negative decimal string representing the live balance, +// e.g. "1500.000000000000000000". It is computed by the processor from the +// prior balance plus the event amount and stored as a snapshot — no delta +// arithmetic is performed in the database. +type Balance struct { + PartyID string // canton party (ERC-20: address) + InstrumentAdmin string // instrumentId.admin + InstrumentID string // instrumentId.id + Amount string // current balance, decimal string ≥ 0 +} + // FilterMode controls which token instruments the Parser processes. type FilterMode int From 3360856bc4df042b585bf604e67594bfc003bebe Mon Sep 17 00:00:00 2001 From: sadiq1971 Date: Wed, 18 Mar 2026 18:04:17 +0600 Subject: [PATCH 3/5] improved impl --- pkg/indexer/engine/decoder.go | 2 +- pkg/indexer/engine/export_test.go | 10 + pkg/indexer/engine/fetcher.go | 58 ++---- .../engine/mocks/mock_event_fetcher.go | 11 +- pkg/indexer/engine/mocks/mock_store.go | 41 +--- pkg/indexer/engine/processor.go | 52 ++++- pkg/indexer/engine/processor_test.go | 187 +++++++----------- pkg/indexer/store.go | 47 ----- pkg/indexer/types.go | 5 +- 9 files changed, 160 insertions(+), 253 deletions(-) create mode 100644 pkg/indexer/engine/export_test.go delete mode 100644 pkg/indexer/store.go diff --git a/pkg/indexer/engine/decoder.go b/pkg/indexer/engine/decoder.go index 53e97372..aef78533 100644 --- a/pkg/indexer/engine/decoder.go +++ b/pkg/indexer/engine/decoder.go @@ -108,7 +108,7 @@ func NewTokenTransferDecoder( return &indexer.ParsedEvent{ InstrumentID: instrumentID, - InstrumentAdmin: ev.NestedPartyField("instrumentId", "admin"), + InstrumentAdmin: instrumentAdmin, Issuer: ev.PartyField("issuer"), EventType: et, Amount: ev.NumericField("amount"), diff --git a/pkg/indexer/engine/export_test.go b/pkg/indexer/engine/export_test.go new file mode 100644 index 00000000..9e8e6a7f --- /dev/null +++ b/pkg/indexer/engine/export_test.go @@ -0,0 +1,10 @@ +package engine + +import "time" + +// SetRetryBaseDelay overrides processorRetryBaseDelay for the duration of a test. +func SetRetryBaseDelay(t interface{ Cleanup(func()) }, d time.Duration) { + orig := processorRetryBaseDelay + processorRetryBaseDelay = d + t.Cleanup(func() { processorRetryBaseDelay = orig }) +} diff --git a/pkg/indexer/engine/fetcher.go b/pkg/indexer/engine/fetcher.go index 498424e7..62d816b0 100644 --- a/pkg/indexer/engine/fetcher.go +++ b/pkg/indexer/engine/fetcher.go @@ -2,6 +2,7 @@ package engine import ( "context" + "sync" "sync/atomic" "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" @@ -10,21 +11,20 @@ import ( "go.uber.org/zap" ) -const txChannelCap = 100 - // Fetcher opens a live Canton stream from a caller-supplied resume offset and // exposes the resulting batches via Events. // // Typical usage: // -// decode := indexer.NewTokenTransferDecoder(mode, allowed, logger) -// f := indexer.NewFetcher(streamClient, templateID, decode, logger) +// decode := engine.NewTokenTransferDecoder(mode, allowed, logger) +// f := engine.NewFetcher(streamClient, templateID, decode, logger) // f.Start(ctx, lastProcessedOffset) // for batch := range f.Events() { ... } type Fetcher struct { stream *streaming.Stream[*indexer.ParsedEvent] templateID streaming.TemplateID - out chan *streaming.Batch[*indexer.ParsedEvent] + out <-chan *streaming.Batch[*indexer.ParsedEvent] + once sync.Once logger *zap.Logger } @@ -43,47 +43,29 @@ func NewFetcher( return &Fetcher{ stream: streaming.NewStream(streamer, decode), templateID: templateID, - out: make(chan *streaming.Batch[*indexer.ParsedEvent], txChannelCap), logger: logger, } } -// Start begins streaming from offset in a background goroutine. It is non-blocking. -// The goroutine exits when ctx is canceled or the underlying stream closes. +// Start begins streaming from offset. It is non-blocking; the underlying goroutine +// exits when ctx is canceled or the stream closes. // -// Start must be called exactly once before Events is used. +// Start must be called exactly once before Events is used. Subsequent calls are no-ops. func (f *Fetcher) Start(ctx context.Context, offset int64) { - f.logger.Info("fetcher starting", zap.Int64("resume_offset", offset)) - - // lastOffset is updated atomically by the streaming.Client goroutine as - // transactions arrive, and read back by its reconnect loop on each new - // connection attempt, ensuring exactly-once resumption from the right point. - var lastOffset int64 - atomic.StoreInt64(&lastOffset, offset) + f.once.Do(func() { + f.logger.Info("fetcher starting", zap.Int64("resume_offset", offset)) - batchCh := f.stream.Subscribe(ctx, streaming.SubscribeRequest{ - FromOffset: offset, - TemplateIDs: []streaming.TemplateID{f.templateID}, - }, &lastOffset) + // lastOffset is updated atomically by the streaming.Client goroutine as + // transactions arrive, and read back by its reconnect loop on each new + // connection attempt, ensuring exactly-once resumption from the right point. + var lastOffset int64 + atomic.StoreInt64(&lastOffset, offset) - go func() { - defer close(f.out) - for { - select { - case batch, ok := <-batchCh: - if !ok { - return - } - select { - case f.out <- batch: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() + f.out = f.stream.Subscribe(ctx, streaming.SubscribeRequest{ + FromOffset: offset, + TemplateIDs: []streaming.TemplateID{f.templateID}, + }, &lastOffset) + }) } // Events returns the read-only channel of decoded batches. diff --git a/pkg/indexer/engine/mocks/mock_event_fetcher.go b/pkg/indexer/engine/mocks/mock_event_fetcher.go index 76757f06..ef0d08b3 100644 --- a/pkg/indexer/engine/mocks/mock_event_fetcher.go +++ b/pkg/indexer/engine/mocks/mock_event_fetcher.go @@ -45,12 +45,10 @@ func (_m *EventFetcher) Events() <-chan *streaming.Batch[*indexer.ParsedEvent] { return r0 } -// EventFetcher_Events_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Events' type EventFetcher_Events_Call struct { *mock.Call } -// Events is a helper method to define mock.On call func (_e *EventFetcher_Expecter) Events() *EventFetcher_Events_Call { return &EventFetcher_Events_Call{Call: _e.mock.On("Events")} } @@ -77,14 +75,10 @@ func (_m *EventFetcher) Start(ctx context.Context, offset int64) { _m.Called(ctx, offset) } -// EventFetcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' type EventFetcher_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call -// - ctx context.Context -// - offset int64 func (_e *EventFetcher_Expecter) Start(ctx interface{}, offset interface{}) *EventFetcher_Start_Call { return &EventFetcher_Start_Call{Call: _e.mock.On("Start", ctx, offset)} } @@ -102,12 +96,13 @@ func (_c *EventFetcher_Start_Call) Return() *EventFetcher_Start_Call { } func (_c *EventFetcher_Start_Call) RunAndReturn(run func(context.Context, int64)) *EventFetcher_Start_Call { - _c.Run(run) + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64)) + }) return _c } // NewEventFetcher creates a new instance of EventFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. func NewEventFetcher(t interface { mock.TestingT Cleanup(func()) diff --git a/pkg/indexer/engine/mocks/mock_store.go b/pkg/indexer/engine/mocks/mock_store.go index e5b1bf99..e8a107cb 100644 --- a/pkg/indexer/engine/mocks/mock_store.go +++ b/pkg/indexer/engine/mocks/mock_store.go @@ -6,6 +6,7 @@ import ( context "context" indexer "github.com/chainsafe/canton-middleware/pkg/indexer" + engine "github.com/chainsafe/canton-middleware/pkg/indexer/engine" mock "github.com/stretchr/testify/mock" ) @@ -41,17 +42,10 @@ func (_m *Store) ApplyBalanceDelta(ctx context.Context, partyID string, instrume return r0 } -// Store_ApplyBalanceDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplyBalanceDelta' type Store_ApplyBalanceDelta_Call struct { *mock.Call } -// ApplyBalanceDelta is a helper method to define mock.On call -// - ctx context.Context -// - partyID string -// - instrumentAdmin string -// - instrumentID string -// - delta string func (_e *Store_Expecter) ApplyBalanceDelta(ctx interface{}, partyID interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplyBalanceDelta_Call { return &Store_ApplyBalanceDelta_Call{Call: _e.mock.On("ApplyBalanceDelta", ctx, partyID, instrumentAdmin, instrumentID, delta)} } @@ -91,16 +85,10 @@ func (_m *Store) ApplySupplyDelta(ctx context.Context, instrumentAdmin string, i return r0 } -// Store_ApplySupplyDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySupplyDelta' type Store_ApplySupplyDelta_Call struct { *mock.Call } -// ApplySupplyDelta is a helper method to define mock.On call -// - ctx context.Context -// - instrumentAdmin string -// - instrumentID string -// - delta string func (_e *Store_Expecter) ApplySupplyDelta(ctx interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplySupplyDelta_Call { return &Store_ApplySupplyDelta_Call{Call: _e.mock.On("ApplySupplyDelta", ctx, instrumentAdmin, instrumentID, delta)} } @@ -150,13 +138,10 @@ func (_m *Store) LatestOffset(ctx context.Context) (int64, error) { return r0, r1 } -// Store_LatestOffset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestOffset' type Store_LatestOffset_Call struct { *mock.Call } -// LatestOffset is a helper method to define mock.On call -// - ctx context.Context func (_e *Store_Expecter) LatestOffset(ctx interface{}) *Store_LatestOffset_Call { return &Store_LatestOffset_Call{Call: _e.mock.On("LatestOffset", ctx)} } @@ -179,7 +164,7 @@ func (_c *Store_LatestOffset_Call) RunAndReturn(run func(context.Context) (int64 } // RunInTx provides a mock function with given fields: ctx, fn -func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, indexer.Store) error) error { +func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, engine.Store) error) error { ret := _m.Called(ctx, fn) if len(ret) == 0 { @@ -187,7 +172,7 @@ func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, indexer.S } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, func(context.Context, indexer.Store) error) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, func(context.Context, engine.Store) error) error); ok { r0 = rf(ctx, fn) } else { r0 = ret.Error(0) @@ -196,21 +181,17 @@ func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, indexer.S return r0 } -// Store_RunInTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunInTx' type Store_RunInTx_Call struct { *mock.Call } -// RunInTx is a helper method to define mock.On call -// - ctx context.Context -// - fn func(context.Context , indexer.Store) error func (_e *Store_Expecter) RunInTx(ctx interface{}, fn interface{}) *Store_RunInTx_Call { return &Store_RunInTx_Call{Call: _e.mock.On("RunInTx", ctx, fn)} } -func (_c *Store_RunInTx_Call) Run(run func(ctx context.Context, fn func(context.Context, indexer.Store) error)) *Store_RunInTx_Call { +func (_c *Store_RunInTx_Call) Run(run func(ctx context.Context, fn func(context.Context, engine.Store) error)) *Store_RunInTx_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(func(context.Context, indexer.Store) error)) + run(args[0].(context.Context), args[1].(func(context.Context, engine.Store) error)) }) return _c } @@ -220,7 +201,7 @@ func (_c *Store_RunInTx_Call) Return(_a0 error) *Store_RunInTx_Call { return _c } -func (_c *Store_RunInTx_Call) RunAndReturn(run func(context.Context, func(context.Context, indexer.Store) error) error) *Store_RunInTx_Call { +func (_c *Store_RunInTx_Call) RunAndReturn(run func(context.Context, func(context.Context, engine.Store) error) error) *Store_RunInTx_Call { _c.Call.Return(run) return _c } @@ -243,15 +224,10 @@ func (_m *Store) SaveBatch(ctx context.Context, offset int64, events []*indexer. return r0 } -// Store_SaveBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBatch' type Store_SaveBatch_Call struct { *mock.Call } -// SaveBatch is a helper method to define mock.On call -// - ctx context.Context -// - offset int64 -// - events []*indexer.ParsedEvent func (_e *Store_Expecter) SaveBatch(ctx interface{}, offset interface{}, events interface{}) *Store_SaveBatch_Call { return &Store_SaveBatch_Call{Call: _e.mock.On("SaveBatch", ctx, offset, events)} } @@ -291,14 +267,10 @@ func (_m *Store) UpsertToken(ctx context.Context, token *indexer.Token) error { return r0 } -// Store_UpsertToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpsertToken' type Store_UpsertToken_Call struct { *mock.Call } -// UpsertToken is a helper method to define mock.On call -// - ctx context.Context -// - token *indexer.Token func (_e *Store_Expecter) UpsertToken(ctx interface{}, token interface{}) *Store_UpsertToken_Call { return &Store_UpsertToken_Call{Call: _e.mock.On("UpsertToken", ctx, token)} } @@ -321,7 +293,6 @@ func (_c *Store_UpsertToken_Call) RunAndReturn(run func(context.Context, *indexe } // NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. func NewStore(t interface { mock.TestingT Cleanup(func()) diff --git a/pkg/indexer/engine/processor.go b/pkg/indexer/engine/processor.go index a50fc893..8a987e3c 100644 --- a/pkg/indexer/engine/processor.go +++ b/pkg/indexer/engine/processor.go @@ -29,8 +29,47 @@ type EventFetcher interface { Events() <-chan *streaming.Batch[*indexer.ParsedEvent] } -// Store is the persistence contract for the Processor. Defined in pkg/indexer. -type Store = indexer.Store +// Store defines the persistence contract for the indexer Processor. +// +// The key invariant: offset and events from the same LedgerTransaction must be +// written atomically. This guarantees that after a restart the processor resumes +// from a consistent point — no event is lost and no event is double-written. +// +//go:generate mockery --name Store --output mocks --outpkg mocks --filename mock_store.go --with-expecter +type Store interface { + // LatestOffset returns the last successfully persisted ledger offset. + // Returns 0 and no error when no offset has been stored yet (fresh start). + // Called once at startup, outside any transaction. + LatestOffset(ctx context.Context) (int64, error) + + // RunInTx executes fn inside a single database transaction. + // On success fn's return value is nil and the transaction is committed. + // On any error the transaction is rolled back and the error is returned. + // The Store passed to fn is scoped to the transaction — all methods on it + // participate in the same underlying DB transaction. + RunInTx(ctx context.Context, fn func(ctx context.Context, tx Store) error) error + + // SaveBatch persists a batch of ParsedEvents and advances the stored ledger offset. + // Duplicate events (same ContractID) are silently skipped via ON CONFLICT DO NOTHING. + // When events is empty the offset is still advanced to skip no-op transactions on restart. + SaveBatch(ctx context.Context, offset int64, events []*indexer.ParsedEvent) error + + // UpsertToken records a token deployment on first observation. + // Subsequent calls for the same {InstrumentAdmin, InstrumentID} are no-ops + // (ON CONFLICT DO NOTHING). + UpsertToken(ctx context.Context, token *indexer.Token) error + + // ApplyBalanceDelta adjusts a party's token balance by delta (signed decimal string). + // The balance row is created at zero if it does not yet exist, then delta is added. + // The store must also update Token.HolderCount atomically: + // - increment when a party's balance transitions from zero to positive + // - decrement when a party's balance transitions from positive to zero + ApplyBalanceDelta(ctx context.Context, partyID, instrumentAdmin, instrumentID, delta string) error + + // ApplySupplyDelta adjusts a token's TotalSupply by delta (signed decimal string). + // Called once per mint (+amount) or burn (-amount). Transfer events must not call this. + ApplySupplyDelta(ctx context.Context, instrumentAdmin, instrumentID, delta string) error +} // Processor is the main run loop of the indexer. It wires the EventFetcher to the // Store and writes decoded events atomically. @@ -192,10 +231,19 @@ func balanceUpdatesFromEvent(e *indexer.ParsedEvent) [][2]string { neg := "-" + e.Amount switch e.EventType { case indexer.EventMint: + if e.ToPartyID == nil { + return nil + } return [][2]string{{*e.ToPartyID, e.Amount}} case indexer.EventBurn: + if e.FromPartyID == nil { + return nil + } return [][2]string{{*e.FromPartyID, neg}} case indexer.EventTransfer: + if e.FromPartyID == nil || e.ToPartyID == nil { + return nil + } return [][2]string{{*e.FromPartyID, neg}, {*e.ToPartyID, e.Amount}} default: return nil diff --git a/pkg/indexer/engine/processor_test.go b/pkg/indexer/engine/processor_test.go index 03301315..d79b9406 100644 --- a/pkg/indexer/engine/processor_test.go +++ b/pkg/indexer/engine/processor_test.go @@ -1,4 +1,4 @@ -package engine +package engine_test import ( "context" @@ -6,9 +6,11 @@ import ( "testing" "time" + "github.com/chainsafe/canton-middleware/pkg/indexer/engine" + "github.com/chainsafe/canton-middleware/pkg/indexer/engine/mocks" + "github.com/chainsafe/canton-middleware/pkg/cantonsdk/streaming" "github.com/chainsafe/canton-middleware/pkg/indexer" - "github.com/chainsafe/canton-middleware/pkg/indexer/engine/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -16,8 +18,18 @@ import ( "go.uber.org/zap" ) +const ( + testInstrumentID = "DEMO" + testInstrumentAdmin = "issuer-party::abc123" + testIssuer = "issuer-party::abc123" + testAmount = "100.000000000000000000" + testRecipient = "recipient-party::def456" + testSender = "sender-party::ghi789" + testContractID = "contract-id-1" +) + // --------------------------------------------------------------------------- -// Test event / batch builders (reuse constants from decoder_test.go) +// Builders // --------------------------------------------------------------------------- func mintEvent() *indexer.ParsedEvent { @@ -50,7 +62,7 @@ func burnEvent() *indexer.ParsedEvent { } } -func transferEventParsed() *indexer.ParsedEvent { +func transferEvent() *indexer.ParsedEvent { s := testSender r := testRecipient return &indexer.ParsedEvent{ @@ -67,7 +79,7 @@ func transferEventParsed() *indexer.ParsedEvent { } } -func makeProcBatch(offset int64, events ...*indexer.ParsedEvent) *streaming.Batch[*indexer.ParsedEvent] { +func makeBatch(offset int64, events ...*indexer.ParsedEvent) *streaming.Batch[*indexer.ParsedEvent] { return &streaming.Batch[*indexer.ParsedEvent]{ Offset: offset, UpdateID: "update-" + string(rune('0'+offset)), @@ -75,7 +87,6 @@ func makeProcBatch(offset int64, events ...*indexer.ParsedEvent) *streaming.Batc } } -// feedCh sends batches into a buffered channel and closes it. func feedCh(batches ...*streaming.Batch[*indexer.ParsedEvent]) <-chan *streaming.Batch[*indexer.ParsedEvent] { ch := make(chan *streaming.Batch[*indexer.ParsedEvent], len(batches)) for _, b := range batches { @@ -85,85 +96,15 @@ func feedCh(batches ...*streaming.Batch[*indexer.ParsedEvent]) <-chan *streaming return ch } -// setupRunInTx wires RunInTx to immediately execute its callback with the mock store. func setupRunInTx(store *mocks.Store) { store.EXPECT().RunInTx(mock.Anything, mock.Anything). - RunAndReturn(func(ctx context.Context, fn func(context.Context, indexer.Store) error) error { + RunAndReturn(func(ctx context.Context, fn func(context.Context, engine.Store) error) error { return fn(ctx, store) }) } // --------------------------------------------------------------------------- -// tokenFromEvent -// --------------------------------------------------------------------------- - -func TestTokenFromEvent(t *testing.T) { - e := mintEvent() - tok := tokenFromEvent(e) - - assert.Equal(t, testInstrumentAdmin, tok.InstrumentAdmin) - assert.Equal(t, testInstrumentID, tok.InstrumentID) - assert.Equal(t, testIssuer, tok.Issuer) - assert.Equal(t, int64(1), tok.FirstSeenOffset) - assert.Equal(t, time.Unix(1_700_000_000, 0), tok.FirstSeenAt) - // TotalSupply and HolderCount are left at zero — the store maintains them. - assert.Empty(t, tok.TotalSupply) - assert.Equal(t, int64(0), tok.HolderCount) -} - -// --------------------------------------------------------------------------- -// supplyDeltaFromEvent -// --------------------------------------------------------------------------- - -func TestSupplyDeltaFromEvent_Mint(t *testing.T) { - _, _, delta, ok := supplyDeltaFromEvent(mintEvent()) - require.True(t, ok) - assert.Equal(t, testAmount, delta) -} - -func TestSupplyDeltaFromEvent_Burn(t *testing.T) { - _, _, delta, ok := supplyDeltaFromEvent(burnEvent()) - require.True(t, ok) - assert.Equal(t, "-"+testAmount, delta) -} - -func TestSupplyDeltaFromEvent_Transfer_NoOp(t *testing.T) { - instrumentAdmin, instrumentID, delta, ok := supplyDeltaFromEvent(transferEventParsed()) - assert.Empty(t, instrumentAdmin) - assert.Empty(t, instrumentID) - assert.Empty(t, delta) - assert.False(t, ok) -} - -// --------------------------------------------------------------------------- -// balanceUpdatesFromEvent -// --------------------------------------------------------------------------- - -func TestBalanceUpdatesFromEvent_Mint(t *testing.T) { - updates := balanceUpdatesFromEvent(mintEvent()) - require.Len(t, updates, 1) - assert.Equal(t, testRecipient, updates[0][0]) - assert.Equal(t, testAmount, updates[0][1]) -} - -func TestBalanceUpdatesFromEvent_Burn(t *testing.T) { - updates := balanceUpdatesFromEvent(burnEvent()) - require.Len(t, updates, 1) - assert.Equal(t, testSender, updates[0][0]) - assert.Equal(t, "-"+testAmount, updates[0][1]) -} - -func TestBalanceUpdatesFromEvent_Transfer(t *testing.T) { - updates := balanceUpdatesFromEvent(transferEventParsed()) - require.Len(t, updates, 2) - assert.Equal(t, testSender, updates[0][0]) - assert.Equal(t, "-"+testAmount, updates[0][1]) - assert.Equal(t, testRecipient, updates[1][0]) - assert.Equal(t, testAmount, updates[1][1]) -} - -// --------------------------------------------------------------------------- -// Processor.Run: startup / lifecycle +// Lifecycle // --------------------------------------------------------------------------- func TestProcessor_Run_LoadOffsetError(t *testing.T) { @@ -173,9 +114,7 @@ func TestProcessor_Run_LoadOffsetError(t *testing.T) { store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), loadErr) - p := NewProcessor(fetcher, store, zap.NewNop()) - err := p.Run(context.Background()) - + err := engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background()) require.Error(t, err) assert.ErrorIs(t, err, loadErr) } @@ -188,8 +127,7 @@ func TestProcessor_Run_StreamClosed_ReturnsNil(t *testing.T) { fetcher.EXPECT().Start(mock.Anything, int64(5)) fetcher.EXPECT().Events().Return(feedCh()) - p := NewProcessor(fetcher, store, zap.NewNop()) - assert.NoError(t, p.Run(context.Background())) + assert.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } func TestProcessor_Run_ContextCancelled(t *testing.T) { @@ -197,23 +135,22 @@ func TestProcessor_Run_ContextCancelled(t *testing.T) { store := mocks.NewStore(t) fetcher := mocks.NewEventFetcher(t) - ch := make(chan *streaming.Batch[*indexer.ParsedEvent]) // never closed / sent + ch := make(chan *streaming.Batch[*indexer.ParsedEvent]) store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) fetcher.EXPECT().Events().Return((<-chan *streaming.Batch[*indexer.ParsedEvent])(ch)) - p := NewProcessor(fetcher, store, zap.NewNop()) - done := make(chan error, 1) - go func() { done <- p.Run(ctx) }() + go func() { done <- engine.NewProcessor(fetcher, store, zap.NewNop()).Run(ctx) }() cancel() assert.ErrorIs(t, <-done, context.Canceled) } // --------------------------------------------------------------------------- -// Processor.Run: per-event-type store call verification +// Event-type store call verification +// (also implicitly tests tokenFromEvent / supplyDeltaFromEvent / balanceUpdatesFromEvent) // --------------------------------------------------------------------------- func TestProcessor_Run_MintBatch(t *testing.T) { @@ -223,15 +160,21 @@ func TestProcessor_Run_MintBatch(t *testing.T) { store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1, ev))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(1, ev))) setupRunInTx(store) - store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) + store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ + InstrumentAdmin: testInstrumentAdmin, + InstrumentID: testInstrumentID, + Issuer: testIssuer, + FirstSeenOffset: 1, + FirstSeenAt: time.Unix(1_700_000_000, 0), + }).Return(nil) store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) store.EXPECT().SaveBatch(mock.Anything, int64(1), []*indexer.ParsedEvent{ev}).Return(nil) - require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } func TestProcessor_Run_BurnBatch(t *testing.T) { @@ -241,34 +184,46 @@ func TestProcessor_Run_BurnBatch(t *testing.T) { store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(2, ev))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(2, ev))) setupRunInTx(store) - store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) + store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ + InstrumentAdmin: testInstrumentAdmin, + InstrumentID: testInstrumentID, + Issuer: testIssuer, + FirstSeenOffset: 2, + FirstSeenAt: time.Unix(1_700_000_000, 0), + }).Return(nil) store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) store.EXPECT().SaveBatch(mock.Anything, int64(2), []*indexer.ParsedEvent{ev}).Return(nil) - require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } func TestProcessor_Run_TransferBatch(t *testing.T) { store := mocks.NewStore(t) fetcher := mocks.NewEventFetcher(t) - ev := transferEventParsed() + ev := transferEvent() store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(3, ev))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(3, ev))) setupRunInTx(store) - store.EXPECT().UpsertToken(mock.Anything, tokenFromEvent(ev)).Return(nil) - // Transfer: no supply delta. + store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ + InstrumentAdmin: testInstrumentAdmin, + InstrumentID: testInstrumentID, + Issuer: testIssuer, + FirstSeenOffset: 3, + FirstSeenAt: time.Unix(1_700_000_000, 0), + }).Return(nil) + // Transfer: no ApplySupplyDelta. store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) store.EXPECT().SaveBatch(mock.Anything, int64(3), []*indexer.ParsedEvent{ev}).Return(nil) - require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } func TestProcessor_Run_EmptyBatch_AdvancesOffset(t *testing.T) { @@ -277,46 +232,41 @@ func TestProcessor_Run_EmptyBatch_AdvancesOffset(t *testing.T) { store.EXPECT().LatestOffset(mock.Anything).Return(int64(9), nil) fetcher.EXPECT().Start(mock.Anything, int64(9)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(10))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(10))) setupRunInTx(store) - // No UpsertToken, ApplySupplyDelta, or ApplyBalanceDelta calls. store.EXPECT().SaveBatch(mock.Anything, int64(10), ([]*indexer.ParsedEvent)(nil)).Return(nil) - require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } // --------------------------------------------------------------------------- -// Processor.Run: retry on transient store error +// Retry behaviour // --------------------------------------------------------------------------- -func TestProcessor_Run_ProcessBatch_StoreError_Retries(t *testing.T) { - processorRetryBaseDelay = time.Millisecond - defer func() { processorRetryBaseDelay = 5 * time.Second }() +func TestProcessor_Run_StoreError_Retries(t *testing.T) { + engine.SetRetryBaseDelay(t, time.Millisecond) store := mocks.NewStore(t) fetcher := mocks.NewEventFetcher(t) store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(1))) - // First attempt fails. store.EXPECT().RunInTx(mock.Anything, mock.Anything). - Return(errors.New("transient db error")).Once() - // Second attempt succeeds. + Return(errors.New("transient error")).Once() store.EXPECT().RunInTx(mock.Anything, mock.Anything). - RunAndReturn(func(ctx context.Context, fn func(context.Context, indexer.Store) error) error { + RunAndReturn(func(ctx context.Context, fn func(context.Context, engine.Store) error) error { return fn(ctx, store) }).Once() store.EXPECT().SaveBatch(mock.Anything, int64(1), ([]*indexer.ParsedEvent)(nil)).Return(nil) - require.NoError(t, NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } func TestProcessor_Run_ContextCancelledDuringRetry(t *testing.T) { - processorRetryBaseDelay = time.Hour // effectively infinite - defer func() { processorRetryBaseDelay = 5 * time.Second }() + engine.SetRetryBaseDelay(t, time.Hour) ctx, cancel := context.WithCancel(context.Background()) store := mocks.NewStore(t) @@ -324,15 +274,14 @@ func TestProcessor_Run_ContextCancelledDuringRetry(t *testing.T) { store.EXPECT().LatestOffset(mock.Anything).Return(int64(0), nil) fetcher.EXPECT().Start(mock.Anything, int64(0)) - fetcher.EXPECT().Events().Return(feedCh(makeProcBatch(1))) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(1))) - // Always fail; cancel immediately so the retry wait is interrupted. store.EXPECT().RunInTx(mock.Anything, mock.Anything). - RunAndReturn(func(_ context.Context, _ func(context.Context, indexer.Store) error) error { + RunAndReturn(func(_ context.Context, _ func(context.Context, engine.Store) error) error { cancel() - return errors.New("persistent db error") + return errors.New("persistent error") }) - err := NewProcessor(fetcher, store, zap.NewNop()).Run(ctx) + err := engine.NewProcessor(fetcher, store, zap.NewNop()).Run(ctx) assert.ErrorIs(t, err, context.Canceled) } diff --git a/pkg/indexer/store.go b/pkg/indexer/store.go deleted file mode 100644 index aa246644..00000000 --- a/pkg/indexer/store.go +++ /dev/null @@ -1,47 +0,0 @@ -package indexer - -import "context" - -// Store defines the persistence contract for the indexer Processor. -// -// The key invariant: offset and events from the same LedgerTransaction must be -// written atomically. This guarantees that after a restart the processor resumes -// from a consistent point — no event is lost and no event is double-written. -// -// The Bun-backed implementation lives in pkg/indexer/store. -// -//go:generate mockery --name Store --output engine/mocks --outpkg mocks --filename mock_store.go --with-expecter -type Store interface { - // LatestOffset returns the last successfully persisted ledger offset. - // Returns 0 and no error when no offset has been stored yet (fresh start). - // Called once at startup, outside any transaction. - LatestOffset(ctx context.Context) (int64, error) - - // RunInTx executes fn inside a single database transaction. - // On success fn's return value is nil and the transaction is committed. - // On any error the transaction is rolled back and the error is returned. - // The Store passed to fn is scoped to the transaction — all methods on it - // participate in the same underlying DB transaction. - RunInTx(ctx context.Context, fn func(ctx context.Context, tx Store) error) error - - // SaveBatch persists a batch of ParsedEvents and advances the stored ledger offset. - // Duplicate events (same ContractID) are silently skipped via ON CONFLICT DO NOTHING. - // When events is empty the offset is still advanced to skip no-op transactions on restart. - SaveBatch(ctx context.Context, offset int64, events []*ParsedEvent) error - - // UpsertToken records a token deployment on first observation. - // Subsequent calls for the same {InstrumentAdmin, InstrumentID} are no-ops - // (ON CONFLICT DO NOTHING). - UpsertToken(ctx context.Context, token *Token) error - - // ApplyBalanceDelta adjusts a party's token balance by delta (signed decimal string). - // The balance row is created at zero if it does not yet exist, then delta is added. - // The store must also update Token.HolderCount atomically: - // - increment when a party's balance transitions from zero to positive - // - decrement when a party's balance transitions from positive to zero - ApplyBalanceDelta(ctx context.Context, partyID, instrumentAdmin, instrumentID, delta string) error - - // ApplySupplyDelta adjusts a token's TotalSupply by delta (signed decimal string). - // Called once per mint (+amount) or burn (-amount). Transfer events must not call this. - ApplySupplyDelta(ctx context.Context, instrumentAdmin, instrumentID, delta string) error -} diff --git a/pkg/indexer/types.go b/pkg/indexer/types.go index 5372fee7..33ac187b 100644 --- a/pkg/indexer/types.go +++ b/pkg/indexer/types.go @@ -108,9 +108,8 @@ type Token struct { // (ERC-20: the per-address entry in the balances mapping, i.e. balanceOf(address).) // // Amount is a non-negative decimal string representing the live balance, -// e.g. "1500.000000000000000000". It is computed by the processor from the -// prior balance plus the event amount and stored as a snapshot — no delta -// arithmetic is performed in the database. +// e.g. "1500.000000000000000000". Updated by the store via delta arithmetic +// (Store.ApplyBalanceDelta) — the store adds the signed delta to the persisted value. type Balance struct { PartyID string // canton party (ERC-20: address) InstrumentAdmin string // instrumentId.admin From 64bfecb1e8d0594d01ad88149a14034d3a162018 Mon Sep 17 00:00:00 2001 From: sadiq1971 Date: Wed, 18 Mar 2026 19:10:27 +0600 Subject: [PATCH 4/5] store modified --- pkg/cantonsdk/values/decode.go | 60 ++++++++++ .../engine/mocks/mock_event_fetcher.go | 11 +- pkg/indexer/engine/mocks/mock_store.go | 112 ++++++++++++++++-- pkg/indexer/engine/processor.go | 33 ++++-- pkg/indexer/engine/processor_test.go | 31 ++++- 5 files changed, 218 insertions(+), 29 deletions(-) diff --git a/pkg/cantonsdk/values/decode.go b/pkg/cantonsdk/values/decode.go index bf062fc5..3844186e 100644 --- a/pkg/cantonsdk/values/decode.go +++ b/pkg/cantonsdk/values/decode.go @@ -17,6 +17,18 @@ func Text(v *lapiv2.Value) string { return "" } +// TextOK extracts a text value and reports whether the type matched. +func TextOK(v *lapiv2.Value) (string, bool) { + if v == nil { + return "", false + } + t, ok := v.Sum.(*lapiv2.Value_Text) + if !ok { + return "", false + } + return t.Text, true +} + // Party extracts a party value. func Party(v *lapiv2.Value) string { if v == nil { @@ -28,6 +40,18 @@ func Party(v *lapiv2.Value) string { return "" } +// PartyOK extracts a party value and reports whether the type matched. +func PartyOK(v *lapiv2.Value) (string, bool) { + if v == nil { + return "", false + } + p, ok := v.Sum.(*lapiv2.Value_Party) + if !ok { + return "", false + } + return p.Party, true +} + // PartyList extracts list of parties. func PartyList(v *lapiv2.Value) []string { if v == nil { @@ -56,6 +80,18 @@ func Numeric(v *lapiv2.Value) string { return "0" } +// NumericOK extracts a numeric value and reports whether the type matched. +func NumericOK(v *lapiv2.Value) (string, bool) { + if v == nil { + return "", false + } + n, ok := v.Sum.(*lapiv2.Value_Numeric) + if !ok { + return "", false + } + return n.Numeric, true +} + // ContractID extracts a contract ID value. func ContractID(v *lapiv2.Value) string { if v == nil { @@ -112,6 +148,18 @@ func Timestamp(v *lapiv2.Value) time.Time { return time.Time{} } +// TimestampOK extracts a timestamp and reports whether the type matched. +func TimestampOK(v *lapiv2.Value) (time.Time, bool) { + if v == nil { + return time.Time{}, false + } + t, ok := v.Sum.(*lapiv2.Value_Timestamp) + if !ok { + return time.Time{}, false + } + return time.UnixMicro(t.Timestamp), true +} + // RecordField extracts a named field from a Record value, returning the sub-map. // Returns nil when v is nil or not a Record. func RecordField(v *lapiv2.Value) map[string]*lapiv2.Value { @@ -132,6 +180,12 @@ func NestedTextField(v *lapiv2.Value, field string) string { return Text(RecordField(v)[field]) } +// NestedTextFieldOK accesses a Text field within a nested DAML Record value and +// reports whether the lookup succeeded with the right type. +func NestedTextFieldOK(v *lapiv2.Value, field string) (string, bool) { + return TextOK(RecordField(v)[field]) +} + // NestedPartyField accesses a Party field within a nested DAML Record value. // Use this for fields like instrumentId.admin. // Returns "" when v is nil, not a Record, or the field is absent. @@ -139,6 +193,12 @@ func NestedPartyField(v *lapiv2.Value, field string) string { return Party(RecordField(v)[field]) } +// NestedPartyFieldOK accesses a Party field within a nested DAML Record value and +// reports whether the lookup succeeded with the right type. +func NestedPartyFieldOK(v *lapiv2.Value, field string) (string, bool) { + return PartyOK(RecordField(v)[field]) +} + // OptionalRecordFields extracts the inner Record fields from an Optional(Record) value. // Returns nil when v is None or the inner value is not a Record. func OptionalRecordFields(v *lapiv2.Value) map[string]*lapiv2.Value { diff --git a/pkg/indexer/engine/mocks/mock_event_fetcher.go b/pkg/indexer/engine/mocks/mock_event_fetcher.go index ef0d08b3..76757f06 100644 --- a/pkg/indexer/engine/mocks/mock_event_fetcher.go +++ b/pkg/indexer/engine/mocks/mock_event_fetcher.go @@ -45,10 +45,12 @@ func (_m *EventFetcher) Events() <-chan *streaming.Batch[*indexer.ParsedEvent] { return r0 } +// EventFetcher_Events_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Events' type EventFetcher_Events_Call struct { *mock.Call } +// Events is a helper method to define mock.On call func (_e *EventFetcher_Expecter) Events() *EventFetcher_Events_Call { return &EventFetcher_Events_Call{Call: _e.mock.On("Events")} } @@ -75,10 +77,14 @@ func (_m *EventFetcher) Start(ctx context.Context, offset int64) { _m.Called(ctx, offset) } +// EventFetcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' type EventFetcher_Start_Call struct { *mock.Call } +// Start is a helper method to define mock.On call +// - ctx context.Context +// - offset int64 func (_e *EventFetcher_Expecter) Start(ctx interface{}, offset interface{}) *EventFetcher_Start_Call { return &EventFetcher_Start_Call{Call: _e.mock.On("Start", ctx, offset)} } @@ -96,13 +102,12 @@ func (_c *EventFetcher_Start_Call) Return() *EventFetcher_Start_Call { } func (_c *EventFetcher_Start_Call) RunAndReturn(run func(context.Context, int64)) *EventFetcher_Start_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(int64)) - }) + _c.Run(run) return _c } // NewEventFetcher creates a new instance of EventFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewEventFetcher(t interface { mock.TestingT Cleanup(func()) diff --git a/pkg/indexer/engine/mocks/mock_store.go b/pkg/indexer/engine/mocks/mock_store.go index e8a107cb..0fc43880 100644 --- a/pkg/indexer/engine/mocks/mock_store.go +++ b/pkg/indexer/engine/mocks/mock_store.go @@ -42,10 +42,17 @@ func (_m *Store) ApplyBalanceDelta(ctx context.Context, partyID string, instrume return r0 } +// Store_ApplyBalanceDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplyBalanceDelta' type Store_ApplyBalanceDelta_Call struct { *mock.Call } +// ApplyBalanceDelta is a helper method to define mock.On call +// - ctx context.Context +// - partyID string +// - instrumentAdmin string +// - instrumentID string +// - delta string func (_e *Store_Expecter) ApplyBalanceDelta(ctx interface{}, partyID interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplyBalanceDelta_Call { return &Store_ApplyBalanceDelta_Call{Call: _e.mock.On("ApplyBalanceDelta", ctx, partyID, instrumentAdmin, instrumentID, delta)} } @@ -85,10 +92,16 @@ func (_m *Store) ApplySupplyDelta(ctx context.Context, instrumentAdmin string, i return r0 } +// Store_ApplySupplyDelta_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySupplyDelta' type Store_ApplySupplyDelta_Call struct { *mock.Call } +// ApplySupplyDelta is a helper method to define mock.On call +// - ctx context.Context +// - instrumentAdmin string +// - instrumentID string +// - delta string func (_e *Store_Expecter) ApplySupplyDelta(ctx interface{}, instrumentAdmin interface{}, instrumentID interface{}, delta interface{}) *Store_ApplySupplyDelta_Call { return &Store_ApplySupplyDelta_Call{Call: _e.mock.On("ApplySupplyDelta", ctx, instrumentAdmin, instrumentID, delta)} } @@ -110,6 +123,63 @@ func (_c *Store_ApplySupplyDelta_Call) RunAndReturn(run func(context.Context, st return _c } +// InsertEvent provides a mock function with given fields: ctx, event +func (_m *Store) InsertEvent(ctx context.Context, event *indexer.ParsedEvent) (bool, error) { + ret := _m.Called(ctx, event) + + if len(ret) == 0 { + panic("no return value specified for InsertEvent") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *indexer.ParsedEvent) (bool, error)); ok { + return rf(ctx, event) + } + if rf, ok := ret.Get(0).(func(context.Context, *indexer.ParsedEvent) bool); ok { + r0 = rf(ctx, event) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, *indexer.ParsedEvent) error); ok { + r1 = rf(ctx, event) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Store_InsertEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertEvent' +type Store_InsertEvent_Call struct { + *mock.Call +} + +// InsertEvent is a helper method to define mock.On call +// - ctx context.Context +// - event *indexer.ParsedEvent +func (_e *Store_Expecter) InsertEvent(ctx interface{}, event interface{}) *Store_InsertEvent_Call { + return &Store_InsertEvent_Call{Call: _e.mock.On("InsertEvent", ctx, event)} +} + +func (_c *Store_InsertEvent_Call) Run(run func(ctx context.Context, event *indexer.ParsedEvent)) *Store_InsertEvent_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*indexer.ParsedEvent)) + }) + return _c +} + +func (_c *Store_InsertEvent_Call) Return(inserted bool, err error) *Store_InsertEvent_Call { + _c.Call.Return(inserted, err) + return _c +} + +func (_c *Store_InsertEvent_Call) RunAndReturn(run func(context.Context, *indexer.ParsedEvent) (bool, error)) *Store_InsertEvent_Call { + _c.Call.Return(run) + return _c +} + // LatestOffset provides a mock function with given fields: ctx func (_m *Store) LatestOffset(ctx context.Context) (int64, error) { ret := _m.Called(ctx) @@ -138,10 +208,13 @@ func (_m *Store) LatestOffset(ctx context.Context) (int64, error) { return r0, r1 } +// Store_LatestOffset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestOffset' type Store_LatestOffset_Call struct { *mock.Call } +// LatestOffset is a helper method to define mock.On call +// - ctx context.Context func (_e *Store_Expecter) LatestOffset(ctx interface{}) *Store_LatestOffset_Call { return &Store_LatestOffset_Call{Call: _e.mock.On("LatestOffset", ctx)} } @@ -181,10 +254,14 @@ func (_m *Store) RunInTx(ctx context.Context, fn func(context.Context, engine.St return r0 } +// Store_RunInTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunInTx' type Store_RunInTx_Call struct { *mock.Call } +// RunInTx is a helper method to define mock.On call +// - ctx context.Context +// - fn func(context.Context , engine.Store) error func (_e *Store_Expecter) RunInTx(ctx interface{}, fn interface{}) *Store_RunInTx_Call { return &Store_RunInTx_Call{Call: _e.mock.On("RunInTx", ctx, fn)} } @@ -206,17 +283,17 @@ func (_c *Store_RunInTx_Call) RunAndReturn(run func(context.Context, func(contex return _c } -// SaveBatch provides a mock function with given fields: ctx, offset, events -func (_m *Store) SaveBatch(ctx context.Context, offset int64, events []*indexer.ParsedEvent) error { - ret := _m.Called(ctx, offset, events) +// SaveOffset provides a mock function with given fields: ctx, offset +func (_m *Store) SaveOffset(ctx context.Context, offset int64) error { + ret := _m.Called(ctx, offset) if len(ret) == 0 { - panic("no return value specified for SaveBatch") + panic("no return value specified for SaveOffset") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, int64, []*indexer.ParsedEvent) error); ok { - r0 = rf(ctx, offset, events) + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, offset) } else { r0 = ret.Error(0) } @@ -224,27 +301,31 @@ func (_m *Store) SaveBatch(ctx context.Context, offset int64, events []*indexer. return r0 } -type Store_SaveBatch_Call struct { +// Store_SaveOffset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveOffset' +type Store_SaveOffset_Call struct { *mock.Call } -func (_e *Store_Expecter) SaveBatch(ctx interface{}, offset interface{}, events interface{}) *Store_SaveBatch_Call { - return &Store_SaveBatch_Call{Call: _e.mock.On("SaveBatch", ctx, offset, events)} +// SaveOffset is a helper method to define mock.On call +// - ctx context.Context +// - offset int64 +func (_e *Store_Expecter) SaveOffset(ctx interface{}, offset interface{}) *Store_SaveOffset_Call { + return &Store_SaveOffset_Call{Call: _e.mock.On("SaveOffset", ctx, offset)} } -func (_c *Store_SaveBatch_Call) Run(run func(ctx context.Context, offset int64, events []*indexer.ParsedEvent)) *Store_SaveBatch_Call { +func (_c *Store_SaveOffset_Call) Run(run func(ctx context.Context, offset int64)) *Store_SaveOffset_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(int64), args[2].([]*indexer.ParsedEvent)) + run(args[0].(context.Context), args[1].(int64)) }) return _c } -func (_c *Store_SaveBatch_Call) Return(_a0 error) *Store_SaveBatch_Call { +func (_c *Store_SaveOffset_Call) Return(_a0 error) *Store_SaveOffset_Call { _c.Call.Return(_a0) return _c } -func (_c *Store_SaveBatch_Call) RunAndReturn(run func(context.Context, int64, []*indexer.ParsedEvent) error) *Store_SaveBatch_Call { +func (_c *Store_SaveOffset_Call) RunAndReturn(run func(context.Context, int64) error) *Store_SaveOffset_Call { _c.Call.Return(run) return _c } @@ -267,10 +348,14 @@ func (_m *Store) UpsertToken(ctx context.Context, token *indexer.Token) error { return r0 } +// Store_UpsertToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpsertToken' type Store_UpsertToken_Call struct { *mock.Call } +// UpsertToken is a helper method to define mock.On call +// - ctx context.Context +// - token *indexer.Token func (_e *Store_Expecter) UpsertToken(ctx interface{}, token interface{}) *Store_UpsertToken_Call { return &Store_UpsertToken_Call{Call: _e.mock.On("UpsertToken", ctx, token)} } @@ -293,6 +378,7 @@ func (_c *Store_UpsertToken_Call) RunAndReturn(run func(context.Context, *indexe } // NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewStore(t interface { mock.TestingT Cleanup(func()) diff --git a/pkg/indexer/engine/processor.go b/pkg/indexer/engine/processor.go index 8a987e3c..ee764935 100644 --- a/pkg/indexer/engine/processor.go +++ b/pkg/indexer/engine/processor.go @@ -49,10 +49,15 @@ type Store interface { // participate in the same underlying DB transaction. RunInTx(ctx context.Context, fn func(ctx context.Context, tx Store) error) error - // SaveBatch persists a batch of ParsedEvents and advances the stored ledger offset. - // Duplicate events (same ContractID) are silently skipped via ON CONFLICT DO NOTHING. - // When events is empty the offset is still advanced to skip no-op transactions on restart. - SaveBatch(ctx context.Context, offset int64, events []*indexer.ParsedEvent) error + // InsertEvent persists one ParsedEvent by ContractID. + // Returns inserted=false when the event already exists and should therefore not + // mutate any derived state a second time. + InsertEvent(ctx context.Context, event *indexer.ParsedEvent) (inserted bool, err error) + + // SaveOffset advances the stored ledger offset after all newly inserted events in + // the transaction have updated derived state. It must be safe to call even when the + // batch was empty or every event was already present. + SaveOffset(ctx context.Context, offset int64) error // UpsertToken records a token deployment on first observation. // Subsequent calls for the same {InstrumentAdmin, InstrumentID} are no-ops @@ -156,11 +161,21 @@ func (p *Processor) processBatchWithRetry(ctx context.Context, batch *streaming. } // processBatch persists a single decoded batch inside a single database transaction. -// All writes — token upserts, supply/balance deltas, events, and offset advance — are -// committed atomically. On any error the transaction is rolled back and the caller retries. +// Each event is inserted before its derived state is mutated so replayed transactions +// can skip already-indexed events without double-applying balances or supply. +// All writes — event inserts, token upserts, supply/balance deltas, and offset advance — +// are committed atomically. On any error the transaction is rolled back and the caller retries. func (p *Processor) processBatch(ctx context.Context, batch *streaming.Batch[*indexer.ParsedEvent]) error { err := p.store.RunInTx(ctx, func(ctx context.Context, tx Store) error { for _, e := range batch.Items { + inserted, err := tx.InsertEvent(ctx, e) + if err != nil { + return fmt.Errorf("insert event: %w", err) + } + if !inserted { + continue + } + if err := tx.UpsertToken(ctx, tokenFromEvent(e)); err != nil { return fmt.Errorf("upsert token: %w", err) } @@ -178,7 +193,11 @@ func (p *Processor) processBatch(ctx context.Context, batch *streaming.Batch[*in } } - return tx.SaveBatch(ctx, batch.Offset, batch.Items) + if err := tx.SaveOffset(ctx, batch.Offset); err != nil { + return fmt.Errorf("save offset: %w", err) + } + + return nil }) if err != nil { return fmt.Errorf("tx at offset %d: %w", batch.Offset, err) diff --git a/pkg/indexer/engine/processor_test.go b/pkg/indexer/engine/processor_test.go index d79b9406..6d4af7e7 100644 --- a/pkg/indexer/engine/processor_test.go +++ b/pkg/indexer/engine/processor_test.go @@ -163,6 +163,7 @@ func TestProcessor_Run_MintBatch(t *testing.T) { fetcher.EXPECT().Events().Return(feedCh(makeBatch(1, ev))) setupRunInTx(store) + store.EXPECT().InsertEvent(mock.Anything, ev).Return(true, nil) store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ InstrumentAdmin: testInstrumentAdmin, InstrumentID: testInstrumentID, @@ -172,7 +173,7 @@ func TestProcessor_Run_MintBatch(t *testing.T) { }).Return(nil) store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) - store.EXPECT().SaveBatch(mock.Anything, int64(1), []*indexer.ParsedEvent{ev}).Return(nil) + store.EXPECT().SaveOffset(mock.Anything, int64(1)).Return(nil) require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } @@ -187,6 +188,7 @@ func TestProcessor_Run_BurnBatch(t *testing.T) { fetcher.EXPECT().Events().Return(feedCh(makeBatch(2, ev))) setupRunInTx(store) + store.EXPECT().InsertEvent(mock.Anything, ev).Return(true, nil) store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ InstrumentAdmin: testInstrumentAdmin, InstrumentID: testInstrumentID, @@ -196,7 +198,7 @@ func TestProcessor_Run_BurnBatch(t *testing.T) { }).Return(nil) store.EXPECT().ApplySupplyDelta(mock.Anything, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) - store.EXPECT().SaveBatch(mock.Anything, int64(2), []*indexer.ParsedEvent{ev}).Return(nil) + store.EXPECT().SaveOffset(mock.Anything, int64(2)).Return(nil) require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } @@ -211,6 +213,7 @@ func TestProcessor_Run_TransferBatch(t *testing.T) { fetcher.EXPECT().Events().Return(feedCh(makeBatch(3, ev))) setupRunInTx(store) + store.EXPECT().InsertEvent(mock.Anything, ev).Return(true, nil) store.EXPECT().UpsertToken(mock.Anything, &indexer.Token{ InstrumentAdmin: testInstrumentAdmin, InstrumentID: testInstrumentID, @@ -221,7 +224,7 @@ func TestProcessor_Run_TransferBatch(t *testing.T) { // Transfer: no ApplySupplyDelta. store.EXPECT().ApplyBalanceDelta(mock.Anything, testSender, testInstrumentAdmin, testInstrumentID, "-"+testAmount).Return(nil) store.EXPECT().ApplyBalanceDelta(mock.Anything, testRecipient, testInstrumentAdmin, testInstrumentID, testAmount).Return(nil) - store.EXPECT().SaveBatch(mock.Anything, int64(3), []*indexer.ParsedEvent{ev}).Return(nil) + store.EXPECT().SaveOffset(mock.Anything, int64(3)).Return(nil) require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } @@ -235,13 +238,29 @@ func TestProcessor_Run_EmptyBatch_AdvancesOffset(t *testing.T) { fetcher.EXPECT().Events().Return(feedCh(makeBatch(10))) setupRunInTx(store) - store.EXPECT().SaveBatch(mock.Anything, int64(10), ([]*indexer.ParsedEvent)(nil)).Return(nil) + store.EXPECT().SaveOffset(mock.Anything, int64(10)).Return(nil) + + require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) +} + +func TestProcessor_Run_DuplicateEvent_SkipsDerivedStateButAdvancesOffset(t *testing.T) { + store := mocks.NewStore(t) + fetcher := mocks.NewEventFetcher(t) + ev := mintEvent() + + store.EXPECT().LatestOffset(mock.Anything).Return(int64(4), nil) + fetcher.EXPECT().Start(mock.Anything, int64(4)) + fetcher.EXPECT().Events().Return(feedCh(makeBatch(5, ev))) + + setupRunInTx(store) + store.EXPECT().InsertEvent(mock.Anything, ev).Return(false, nil) + store.EXPECT().SaveOffset(mock.Anything, int64(5)).Return(nil) require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } // --------------------------------------------------------------------------- -// Retry behaviour +// Retry behavior // --------------------------------------------------------------------------- func TestProcessor_Run_StoreError_Retries(t *testing.T) { @@ -260,7 +279,7 @@ func TestProcessor_Run_StoreError_Retries(t *testing.T) { RunAndReturn(func(ctx context.Context, fn func(context.Context, engine.Store) error) error { return fn(ctx, store) }).Once() - store.EXPECT().SaveBatch(mock.Anything, int64(1), ([]*indexer.ParsedEvent)(nil)).Return(nil) + store.EXPECT().SaveOffset(mock.Anything, int64(1)).Return(nil) require.NoError(t, engine.NewProcessor(fetcher, store, zap.NewNop()).Run(context.Background())) } From ba59defab9f97ae50c0ae111167dc7adcc73635f Mon Sep 17 00:00:00 2001 From: sadiq1971 Date: Wed, 18 Mar 2026 19:12:50 +0600 Subject: [PATCH 5/5] removed doc --- indexer-design.md | 1699 --------------------------------------------- 1 file changed, 1699 deletions(-) delete mode 100644 indexer-design.md diff --git a/indexer-design.md b/indexer-design.md deleted file mode 100644 index 07b9d44c..00000000 --- a/indexer-design.md +++ /dev/null @@ -1,1699 +0,0 @@ -# Canton ERC-20 Indexer — Design Document - -> **Status:** Design / Pre-Implementation -> **CIP Reference:** CIP-0086 (ERC-20 Middleware & Distributed Indexer) -> **Scope:** Phase 1 — CIP-56 token indexer (DEMO + PROMPT); no Canton Coin yet - ---- - -## Table of Contents - -1. [Background & Motivation](#1-background--motivation) -2. [Current State & Gaps](#2-current-state--gaps) -3. [Key Design Questions Answered](#3-key-design-questions-answered) -4. [Architecture Overview](#4-architecture-overview) -5. [DAML Contract Change — Unified `TokenTransferEvent`](#5-daml-contract-change--unified-tokentransferevent) -6. [Component Deep-Dive](#6-component-deep-dive) - - 6.1 [cantonsdk Streaming Client](#61-cantonsdk-streaming-client-new-package) - - 6.2 [Fetcher](#62-fetcher) - - 6.3 [Parser](#63-parser) - - 6.4 [Processor](#64-processor) - - 6.5 [Store — Models & PostgreSQL](#65-store--models--postgresql) - - 6.6 [Database Migrations (Go code)](#66-database-migrations-go-code) - - 6.7 [Service Layer](#67-service-layer) - - 6.8 [API / HTTP Layer](#68-api--http-layer) -7. [File & Directory Layout](#7-file--directory-layout) -8. [Pseudo-code & Data Flows](#8-pseudo-code--data-flows) -9. [Configuration](#9-configuration) -10. [Integration with API Server](#10-integration-with-api-server) -11. [Open Questions & Future Work](#11-open-questions--future-work) - ---- - -## 1. Background & Motivation - -CIP-0086 mandates a **distributed indexer** that aggregates Canton token state and exposes -ERC-20-compatible HTTP endpoints. The current `reconciler` in `pkg/reconciler/` is a -periodic polling loop (snapshot-based) that: - -- Queries all `CIP56Holding` active contracts every N seconds -- Aggregates per-party balances — only current state, no history -- Tracks only bridge (mint/burn) events via `bridge_events` table -- Misses transfers made directly on Canton (visible only via holdings snapshot) - -**What the reconciler lacks:** -- Continuous streaming — balance lag between polls, events missed -- Transfer event history — can't answer "show me all transfers for party X" -- Resumability — replays from scratch on restart -- Independent HTTP query API -- Scalability — hard-coded to DEMO/PROMPT package IDs - -The indexer is a **separate, independent binary** (`cmd/indexer`) with no dependency on -the api-server's user table or user registration flow. It is Canton-native: it speaks -`canton_party_id` as its primary identity. EVM address mapping is the api-server's -responsibility, not the indexer's. - ---- - -## 2. Current State & Gaps - -``` -Current Architecture (reconciler, inside api-server process): - - StartPeriodicReconciliation(interval) - │ - ▼ every N seconds - ReconcileAll() - ├── GetAllHoldings() → StateService.GetActiveContracts() (snapshot) - ├── SetBalanceByCantonPartyID() - ├── SetTotalSupply() - ├── GetMintEvents() → active contract query (no streaming) - └── GetBurnEvents() → "Transfers are internal Canton operations, not tracked" - - PostgreSQL: user_token_balances, bridge_events, token_metrics - -Gaps: - ✗ No transfer history — only current balance - ✗ Balance lag between reconcile intervals - ✗ Not resumable (no ledger offset checkpoint) - ✗ No independent HTTP query API - ✗ Restarts replay from offset 0 - ✗ Coupled to api-server process and userstore -``` - ---- - -## 3. Key Design Questions Answered - -### Q1: Use / extend cantonsdk for the fetcher? - -**Yes — add `pkg/cantonsdk/streaming/` as a new generic streaming package.** - -The existing `pkg/cantonsdk/bridge/client.go` already uses `UpdateService.GetUpdates` -(gRPC server-streaming) inside `StreamWithdrawalEvents`, with exponential backoff -reconnect, auth token invalidation on 401, and offset resumption. The new package -formalises this pattern as a reusable, generic streaming client. The indexer fetcher -delegates entirely to it. - -**WebSocket note:** Canton's gRPC API does not support WebSocket. The Canton→indexer -connection is always gRPC HTTP/2 server-streaming. WebSocket is a Phase 2 option for the -indexer→client direction (real-time event subscriptions). - -### Q2: Add `TransferEvent`? Use a unified event for all cases? - -**Yes — add a single `TokenTransferEvent` DAML template covering MINT, BURN, TRANSFER.** - -This mirrors ERC-20's `Transfer(address indexed from, address indexed to, uint256 value)`: - -- **MINT**: `fromParty = None` -- **BURN**: `toParty = None` -- **TRANSFER**: both set - -The indexer subscribes to **only this one template** — no inference heuristics, no holding -lifecycle correlation. Clean, deterministic, ERC-20 idiomatic. - -**Does it violate Canton privacy?** No. The observer pattern is: -``` -signatory issuer ← indexer runs as issuer, sees all events -observer fromParty, toParty, auditObservers -``` -Identical to the existing `MintEvent` / `BurnEvent` pattern. Parties only see events they -are party to. The indexer (as issuer) has full visibility — same as the current -reconciler. Existing `MintEvent` / `BurnEvent` are kept for backward compatibility. - -**No return type changes needed.** In DAML, `create` inside a choice is a side-effect; -the new event is emitted without touching existing choice signatures. - -### Q3: Why does the indexer NOT depend on userstore? - -**The indexer is a Canton-native service. Its primary identity is `canton_party_id`.** - -The EVM address → party_id mapping is a concern of the api-server, not the indexer. -Coupling the indexer to `userstore` would: -- Make it non-deployable independently (always needs api-server's DB schema) -- Break the separation of concerns (indexer = ledger aggregator, not user registry) -- Prevent it from serving non-EVM Canton parties in the future - -**How callers query the indexer without userstore:** - -| Caller | Flow | -|--------|------| -| **api-server** | Resolves EVM address → `canton_party_id` via its own userstore, then calls indexer with a JWT whose claims contain `canton_party_id`. The indexer never sees the EVM address. | -| **Direct client** (wallet, dApp) | Client sends a JWT issued by the api-server (or auth server) that contains `canton_party_id`. Indexer validates JWT, extracts `canton_party_id`, scopes query. | -| **Public queries** | `totalSupply`, token metadata — no auth, no party resolution needed. | - -**The indexer's auth contract:** Validate JWT signature against the shared JWKS endpoint. -Extract `canton_party_id` from claims. Scope all queries to that party. Done. - -### Q4: Separate admin vs. user API? - -**No — two tiers: public and authenticated (by JWT party_id).** - -In ERC-20, `totalSupply()`, `name()`, `symbol()`, `decimals()` are public. The indexer -follows the same model. An admin tier can be added later if needed. - ---- - -## 4. Architecture Overview - -``` -┌───────────────────────────────────────────────────────────────────────────┐ -│ cmd/indexer (binary) │ -│ (entry → pkg/app/indexer/server.go) │ -│ │ -│ ┌──────────────────────────────────────────────────────────────────────┐ │ -│ │ pkg/cantonsdk/streaming (NEW — reusable across the project) │ │ -│ │ StreamingClient.Subscribe(templateIDs, fromOffset) │ │ -│ │ → UpdateService.GetUpdates (gRPC server-streaming) │ │ -│ │ → exponential backoff reconnect (mirrors StreamWithdrawalEvents) │ │ -│ └──────────────────────────────┬───────────────────────────────────────┘ │ -│ │ chan LedgerTransaction │ -│ ┌───────────────────────────────▼─────────────────────────────────────┐ │ -│ │ pkg/indexer/fetcher │ │ -│ │ loads checkpoint from DB → delegates to cantonsdk/streaming │ │ -│ └───────────────────────────────┬─────────────────────────────────────┘ │ -│ │ chan RawTransaction │ -│ ┌────────────────────────────────▼────────────────────────────────────┐ │ -│ │ pkg/indexer/parser │ │ -│ │ decode TokenTransferEvent → classify MINT | BURN | TRANSFER │ │ -│ │ apply package whitelist filter │ │ -│ └────────────────────────────────┬────────────────────────────────────┘ │ -│ │ chan []ParsedEvent (per tx) │ -│ ┌─────────────────────────────────▼───────────────────────────────────┐ │ -│ │ pkg/indexer/processor │ │ -│ │ BEGIN TX │ │ -│ │ INSERT transfer_events (idempotent via event_id UNIQUE) │ │ -│ │ UPSERT token_balances (±delta by canton_party_id) │ │ -│ │ UPSERT token_stats (total supply) │ │ -│ │ UPDATE ledger_checkpoints │ │ -│ │ COMMIT ← checkpoint committed atomically with events │ │ -│ └─────────────────────────────────┬───────────────────────────────────┘ │ -│ │ │ -│ PostgreSQL │ -│ │ │ -│ ┌──────────────────────────────────▼──────────────────────────────────┐ │ -│ │ pkg/indexer/service (Canton-native query service) │ │ -│ │ All queries keyed by canton_party_id — no EVM address, no user │ │ -│ │ table. Caller is responsible for resolving EVM → party_id. │ │ -│ └──────────────────────────────────┬──────────────────────────────────┘ │ -│ │ │ -│ ┌───────────────────────────────────▼─────────────────────────────────┐ │ -│ │ pkg/indexer/api — HTTP :8082 (chi router) │ │ -│ │ │ │ -│ │ Auth: JWT validation only (shared JWKS with api-server) │ │ -│ │ Claims must contain canton_party_id. │ │ -│ │ No userstore. No EVM sig verification. │ │ -│ │ │ │ -│ │ [public] GET /v1/tokens │ │ -│ │ [public] GET /v1/tokens/{symbol} │ │ -│ │ [public] GET /v1/tokens/{symbol}/totalSupply │ │ -│ │ [JWT] GET /v1/balance/{partyID}[/{symbol}] │ │ -│ │ [JWT] GET /v1/transfers/{partyID}[/{symbol}] │ │ -│ │ [JWT] GET /v1/events/{partyID} │ │ -│ │ GET /health GET /metrics │ │ -│ │ (Phase 2: add /graph for GraphQL) │ │ -│ └─────────────────────────────────────────────────────────────────────┘ │ -└───────────────────────────────────────────────────────────────────────────┘ - ▲ ▲ - Canton Ledger API v2 Callers (api-server or direct clients) - gRPC server-streaming JWT must contain canton_party_id claim - -How api-server uses the indexer: - EVM client → api-server (EVM sig auth + userstore lookup) - → api-server mints JWT with canton_party_id claim - → api-server calls indexer /v1/balance/{partyID} with that JWT - ← indexer returns balance for that party - ← api-server returns result to EVM client -``` - ---- - -## 5. DAML Contract Change — Unified `TokenTransferEvent` - -### New template in `Events.daml` - -```daml --- contracts/canton-erc20/daml/cip56-token/src/CIP56/Events.daml - --- Unified transfer event covering mint, burn, and transfer. --- Mirrors ERC-20 Transfer(from, to, value): --- MINT: fromParty = None, toParty = Some recipient --- BURN: fromParty = Some owner, toParty = None --- TRANSFER: fromParty = Some sender, toParty = Some receiver -template TokenTransferEvent - with - issuer : Party - fromParty : Optional Party -- None for mints - toParty : Optional Party -- None for burns - amount : Decimal - tokenSymbol : Text - eventType : Text -- "MINT" | "BURN" | "TRANSFER" - timestamp : Time - evmTxHash : Optional Text -- bridge deposit tx hash (mints only) - evmDestination : Optional Text -- bridge withdrawal address (burns only) - userFingerprint : Optional Text -- EVM fingerprint, stored for bridge audit only - auditObservers : [Party] - where - signatory issuer - observer - optional [] (\p -> [p]) fromParty, - optional [] (\p -> [p]) toParty, - auditObservers -``` - -### Emit from `TokenConfig.IssuerMint` — no return type change - -```daml --- Config.daml — inside IssuerMint do-block, AFTER creating MintEvent: - _ <- create TokenTransferEvent with - issuer - fromParty = None - toParty = Some recipient - amount - tokenSymbol = getSymbol meta - eventType = "MINT" - timestamp = eventTime - evmTxHash - evmDestination = None - userFingerprint = Some userFingerprint - auditObservers - pure (holdingCid, eventCid) -- return type UNCHANGED -``` - -### Emit from `TokenConfig.IssuerBurn` — no return type change - -```daml --- Config.daml — inside IssuerBurn do-block, AFTER creating BurnEvent: - _ <- create TokenTransferEvent with - issuer - fromParty = Some holding.owner - toParty = None - amount - tokenSymbol = getSymbol meta - eventType = "BURN" - timestamp = eventTime - evmTxHash = None - evmDestination - userFingerprint = Some userFingerprint - auditObservers - pure (remainderCid, eventCid) -- return type UNCHANGED -``` - -### Emit from `CIP56TransferFactory.transferFactory_transferImpl` - -```daml --- TransferFactory.daml — AFTER creating receiverCid: - _ <- create TokenTransferEvent with - issuer = admin - fromParty = Some sender - toParty = Some receiver - amount - tokenSymbol = instrumentId.id - eventType = "TRANSFER" - timestamp = now - evmTxHash = None - evmDestination = None - userFingerprint = None -- pure Canton transfer, no EVM context - auditObservers = [] - pure TransferInstructionResult with ... -- return type UNCHANGED -``` - -> Existing `MintEvent` and `BurnEvent` are kept intact for the reconciler and bridge -> relayer during the migration window. - ---- - -## 6. Component Deep-Dive - -### 6.1 cantonsdk Streaming Client (new package) - -Mirrors `StreamWithdrawalEvents` in `pkg/cantonsdk/bridge/client.go` exactly — same -backoff, same auth invalidation on 401, same reconnect-from-offset logic — but generic -enough for any template subscription. - -```go -// pkg/cantonsdk/streaming/client.go -package streaming - -// LedgerTransaction is a decoded, typed transaction from the GetUpdates stream. -type LedgerTransaction struct { - UpdateID string - Offset int64 - EffectiveTime time.Time - Events []LedgerEvent -} - -// LedgerEvent is a single created or archived contract within a transaction. -type LedgerEvent struct { - ContractID string - PackageID string - ModuleName string - TemplateName string - IsCreated bool - Created *lapiv2.CreatedEvent // set when IsCreated=true - Archived *lapiv2.ArchivedEvent // set when IsCreated=false -} - -// SubscribeRequest configures which templates to stream and from where. -type SubscribeRequest struct { - FromOffset int64 - TemplateIDs []*lapiv2.Identifier -} - -// Client wraps UpdateService.GetUpdates with reconnection and auth handling. -type Client struct { - ledger ledger.Ledger - party string -} - -func New(l ledger.Ledger, party string) *Client { - return &Client{ledger: l, party: party} -} - -// Subscribe opens a live stream against the Canton ledger. -// Reconnects automatically with exponential backoff (5s → 60s, mirrors bridge client). -// lastOffset is updated after each received transaction. The caller commits it to DB -// so reconnects resume from the last safe point. -func (c *Client) Subscribe( - ctx context.Context, - req SubscribeRequest, - lastOffset *int64, -) <-chan *LedgerTransaction { - out := make(chan *LedgerTransaction, 100) - go func() { - defer close(out) - backoff := newExponentialBackoff(5*time.Second, 60*time.Second) - for { - err := c.runStream(ctx, &req, lastOffset, out) - if ctx.Err() != nil { - return - } - // Reload offset — processor commits it to DB on each batch - atomic.StoreInt64(&req.FromOffset, atomic.LoadInt64(lastOffset)) - log.Warn("canton stream disconnected, reconnecting", - "err", err, "resume_offset", req.FromOffset) - backoff.Wait(ctx) - } - }() - return out -} - -func (c *Client) runStream( - ctx context.Context, - req *SubscribeRequest, - lastOffset *int64, - out chan<- *LedgerTransaction, -) error { - authCtx, err := c.ledger.AuthContext(ctx) - if err != nil { - return fmt.Errorf("auth: %w", err) - } - stream, err := c.ledger.Update().GetUpdates(authCtx, &lapiv2.GetUpdatesRequest{ - BeginExclusive: req.FromOffset, - UpdateFormat: &lapiv2.UpdateFormat{ - IncludeTransactions: &lapiv2.TransactionFormat{ - EventFormat: &lapiv2.EventFormat{ - FiltersByParty: map[string]*lapiv2.Filters{ - c.party: buildTemplateFilters(req.TemplateIDs), - }, - Verbose: true, - }, - TransactionShape: lapiv2.TransactionShape_TRANSACTION_SHAPE_ACS_DELTA, - }, - }, - }) - if err != nil { - if isAuthError(err) { - c.ledger.InvalidateToken() - } - return err - } - for { - resp, err := stream.Recv() - if err != nil { - if isAuthError(err) { - c.ledger.InvalidateToken() - } - return err - } - tx := resp.GetTransaction() - if tx == nil { - continue // checkpoint or topology event — skip - } - lt := decodeLedgerTransaction(tx) - atomic.StoreInt64(lastOffset, lt.Offset) - select { - case out <- lt: - case <-ctx.Done(): - return ctx.Err() - } - } -} -``` - -### 6.2 Fetcher - -Thin wrapper — loads checkpoint offset from DB, builds the template filter, delegates to -`cantonsdk/streaming`. No business logic here. - -```go -// pkg/indexer/fetcher/fetcher.go -package fetcher - -type Fetcher struct { - streaming *streaming.Client - store store.Store - templateID *lapiv2.Identifier // TokenTransferEvent fully-resolved ID - out chan<- *streaming.LedgerTransaction -} - -func New( - s *streaming.Client, - st store.Store, - tplID *lapiv2.Identifier, - out chan<- *streaming.LedgerTransaction, -) *Fetcher { - return &Fetcher{streaming: s, store: st, templateID: tplID, out: out} -} - -func (f *Fetcher) Start(ctx context.Context) error { - cp, err := f.store.GetCheckpoint(ctx) - if err != nil { - return fmt.Errorf("load checkpoint: %w", err) - } - var lastOffset int64 = cp.LastProcessedOffset - - events := f.streaming.Subscribe(ctx, streaming.SubscribeRequest{ - FromOffset: lastOffset, - TemplateIDs: []*lapiv2.Identifier{f.templateID}, - }, &lastOffset) - - for { - select { - case tx, ok := <-events: - if !ok { - return nil - } - select { - case f.out <- tx: - case <-ctx.Done(): - return ctx.Err() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} -``` - -### 6.3 Parser - -Since the indexer subscribes only to `TokenTransferEvent`, parsing is a straightforward -DAML record decode using the existing `cantonsdk/values` helpers. No inference needed. - -```go -// pkg/indexer/parser/types.go -package parser - -type EventType string - -const ( - EventMint EventType = "MINT" - EventBurn EventType = "BURN" - EventTransfer EventType = "TRANSFER" -) - -// ParsedEvent is a fully decoded TokenTransferEvent, ready for the processor. -// Primary identity is always canton_party_id — no EVM address at this layer. -type ParsedEvent struct { - EventType EventType - TokenSymbol string - Amount string // decimal string, e.g. "1.5" - FromPartyID *string // nil for mints - ToPartyID *string // nil for burns - UserFingerprint *string // from DAML event — stored for bridge audit, not for auth - EVMTxHash *string // bridge deposit - EVMDestination *string // bridge withdrawal - ContractID string // unique idempotency key (TokenTransferEvent contract ID) - TxID string - LedgerOffset int64 - EffectiveTime time.Time -} -``` - -```go -// pkg/indexer/parser/cip56.go -package parser - -// Uses cantonsdk/values helpers (values.RecordToMap, values.Text, etc.) -// — same pattern as bridge/decode.go -func decodeTokenTransferEvent(ce *lapiv2.CreatedEvent, tx *streaming.LedgerTransaction) *ParsedEvent { - fields := values.RecordToMap(ce.CreateArguments) - - fromParty := optionalParty(fields["fromParty"]) - toParty := optionalParty(fields["toParty"]) - - var et EventType - switch { - case fromParty == nil && toParty != nil: - et = EventMint - case fromParty != nil && toParty == nil: - et = EventBurn - default: - et = EventTransfer - } - - amount, _ := values.Numeric(fields["amount"]) - return &ParsedEvent{ - EventType: et, - TokenSymbol: values.Text(fields["tokenSymbol"]), - Amount: amount.String(), - FromPartyID: fromParty, - ToPartyID: toParty, - UserFingerprint: optionalText(fields["userFingerprint"]), - EVMTxHash: optionalText(fields["evmTxHash"]), - EVMDestination: optionalText(fields["evmDestination"]), - ContractID: ce.ContractId, - TxID: tx.UpdateID, - LedgerOffset: tx.Offset, - EffectiveTime: tx.EffectiveTime, - } -} -``` - -### 6.4 Processor - -Atomic batch writer. Checkpoint update is inside the same DB transaction as the event -writes — guarantees exactly-once processing on restart. - -```go -// pkg/indexer/processor/processor.go -package processor - -func (proc *Processor) processBatch(ctx context.Context, events []*parser.ParsedEvent) error { - if len(events) == 0 { - return nil - } - lastOffset := events[len(events)-1].LedgerOffset - - return proc.store.RunInTx(ctx, func(ctx context.Context, tx store.Tx) error { - for _, ev := range events { - if err := proc.processEvent(ctx, tx, ev); err != nil { - return fmt.Errorf("event %s: %w", ev.ContractID, err) - } - } - return tx.UpdateCheckpoint(ctx, lastOffset) - }) -} - -func (proc *Processor) processEvent(ctx context.Context, tx store.Tx, ev *parser.ParsedEvent) error { - // Idempotent insert — ON CONFLICT (event_id) DO NOTHING - inserted, err := tx.InsertTransferEvent(ctx, toTransferEventDao(ev)) - if err != nil { - return err - } - if !inserted { - return nil // already committed in a previous run - } - - switch ev.EventType { - case parser.EventMint: - if err := tx.IncrementBalance(ctx, *ev.ToPartyID, ev.TokenSymbol, ev.Amount); err != nil { - return err - } - return tx.IncrementTotalSupply(ctx, ev.TokenSymbol, ev.Amount) - - case parser.EventBurn: - if err := tx.DecrementBalance(ctx, *ev.FromPartyID, ev.TokenSymbol, ev.Amount); err != nil { - return err - } - return tx.DecrementTotalSupply(ctx, ev.TokenSymbol, ev.Amount) - - case parser.EventTransfer: - if err := tx.DecrementBalance(ctx, *ev.FromPartyID, ev.TokenSymbol, ev.Amount); err != nil { - return err - } - return tx.IncrementBalance(ctx, *ev.ToPartyID, ev.TokenSymbol, ev.Amount) - } - return nil -} -``` - ---- - -### 6.5 Store — Models & PostgreSQL - -#### `pkg/indexer/store/model.go` - -DAOs follow the exact Bun ORM pattern from `pkg/reconciler/store/model.go`. -**No EVM address in `TokenBalanceDao`** — the indexer is Canton-native. `evm_address` -is the api-server's concern. - -```go -// pkg/indexer/store/model.go -package store - -import ( - "time" - "github.com/uptrace/bun" -) - -// LedgerCheckpointDao — single-row table. Offset committed atomically with each -// processed batch, guaranteeing safe restart from this point. -type LedgerCheckpointDao struct { - bun.BaseModel `bun:"table:ledger_checkpoints,alias:lc"` - ID int `bun:"id,pk,default:1"` - LastProcessedOffset int64 `bun:"last_processed_offset,notnull,default:0"` - LastTxID *string `bun:"last_tx_id,type:varchar(255)"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` -} - -// IndexedTokenDao — registry of token contracts being indexed. -type IndexedTokenDao struct { - bun.BaseModel `bun:"table:indexed_tokens,alias:it"` - PackageID string `bun:"package_id,pk,type:varchar(255)"` - TokenSymbol string `bun:"token_symbol,unique,notnull,type:varchar(50)"` - ModuleName string `bun:"module_name,notnull,type:varchar(255)"` - TemplateName string `bun:"template_name,notnull,type:varchar(255)"` - Name *string `bun:"name,type:varchar(255)"` - Decimals int16 `bun:"decimals,notnull,default:18"` - IssuerPartyID *string `bun:"issuer_party_id,type:varchar(255)"` - AddedAt time.Time `bun:"added_at,nullzero,default:current_timestamp"` -} - -// TransferEventDao — append-only event log. -// event_id = Canton contract ID of the TokenTransferEvent (globally unique). -// fingerprint is stored only because it comes from the DAML event itself (bridge audit). -// It is NOT used for auth or party resolution inside the indexer. -type TransferEventDao struct { - bun.BaseModel `bun:"table:transfer_events,alias:te"` - ID int64 `bun:"id,pk,autoincrement"` - EventID string `bun:"event_id,unique,notnull,type:varchar(512)"` - EventType string `bun:"event_type,notnull,type:varchar(20)"` // MINT|BURN|TRANSFER - TokenSymbol string `bun:"token_symbol,notnull,type:varchar(50)"` - Amount string `bun:"amount,notnull,type:numeric(38,18)"` - FromPartyID *string `bun:"from_party_id,type:varchar(255)"` // nil for mints - ToPartyID *string `bun:"to_party_id,type:varchar(255)"` // nil for burns - Fingerprint *string `bun:"fingerprint,type:varchar(128)"` // from DAML event - EVMTxHash *string `bun:"evm_tx_hash,type:varchar(255)"` - EVMDestination *string `bun:"evm_destination,type:varchar(42)"` - TransactionID *string `bun:"transaction_id,type:varchar(255)"` - LedgerOffset int64 `bun:"ledger_offset,notnull"` - EffectiveTime time.Time `bun:"effective_time,notnull"` - IndexedAt time.Time `bun:"indexed_at,nullzero,default:current_timestamp"` -} - -// TokenBalanceDao — incremental balance cache per party per token. -// Primary key: (party_id, token_symbol). -// NO evm_address — the indexer is Canton-native. EVM mapping is the api-server's job. -type TokenBalanceDao struct { - bun.BaseModel `bun:"table:token_balances,alias:tb"` - PartyID string `bun:"party_id,pk,type:varchar(255)"` - TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` - Balance string `bun:"balance,notnull,default:0,type:numeric(38,18)"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` -} - -// TokenStatDao — aggregate stats per token. -type TokenStatDao struct { - bun.BaseModel `bun:"table:token_stats,alias:ts"` - TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` - TotalSupply string `bun:"total_supply,notnull,default:0,type:numeric(38,18)"` - HolderCount int64 `bun:"holder_count,notnull,default:0"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` -} -``` - -#### `pkg/indexer/store/store.go` - -```go -// pkg/indexer/store/store.go -package store - -import "context" - -//go:generate mockery --name Store --output ./mocks -type Store interface { - GetCheckpoint(ctx context.Context) (*LedgerCheckpointDao, error) - // Queries are keyed by canton_party_id — no EVM address resolution here. - GetTokenBalance(ctx context.Context, partyID, tokenSymbol string) (*TokenBalanceDao, error) - GetTokenStat(ctx context.Context, tokenSymbol string) (*TokenStatDao, error) - ListIndexedTokens(ctx context.Context) ([]*IndexedTokenDao, error) - ListTransferEvents(ctx context.Context, filter TransferEventFilter) ([]*TransferEventDao, int, error) - UpsertIndexedToken(ctx context.Context, dao *IndexedTokenDao) error - RunInTx(ctx context.Context, fn func(ctx context.Context, tx Tx) error) error -} - -//go:generate mockery --name Tx --output ./mocks -type Tx interface { - InsertTransferEvent(ctx context.Context, dao *TransferEventDao) (inserted bool, err error) - IncrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error - DecrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error - IncrementTotalSupply(ctx context.Context, tokenSymbol, amount string) error - DecrementTotalSupply(ctx context.Context, tokenSymbol, amount string) error - UpdateCheckpoint(ctx context.Context, offset int64) error -} - -// TransferEventFilter — all fields keyed by canton_party_id, not EVM address. -type TransferEventFilter struct { - PartyID *string // filter events where from_party_id OR to_party_id = this - TokenSymbol *string - EventType *string - Page int - PageSize int -} -``` - -#### `pkg/indexer/store/pg.go` (key methods) - -```go -// pkg/indexer/store/pg.go -package store - -type pgStore struct{ db *bun.DB } - -func NewStore(db *bun.DB) Store { return &pgStore{db: db} } - -func (s *pgStore) GetTokenBalance(ctx context.Context, partyID, tokenSymbol string) (*TokenBalanceDao, error) { - dao := new(TokenBalanceDao) - err := s.db.NewSelect().Model(dao). - Where("party_id = ? AND token_symbol = ?", partyID, tokenSymbol). - Scan(ctx) - if errors.Is(err, sql.ErrNoRows) { - return &TokenBalanceDao{PartyID: partyID, TokenSymbol: tokenSymbol, Balance: "0"}, nil - } - return dao, err -} - -func (s *pgStore) ListTransferEvents(ctx context.Context, f TransferEventFilter) ([]*TransferEventDao, int, error) { - var rows []*TransferEventDao - q := s.db.NewSelect().Model(&rows).OrderExpr("ledger_offset DESC") - - if f.PartyID != nil { - q = q.Where("(from_party_id = ? OR to_party_id = ?)", *f.PartyID, *f.PartyID) - } - if f.TokenSymbol != nil { - q = q.Where("token_symbol = ?", *f.TokenSymbol) - } - if f.EventType != nil { - q = q.Where("event_type = ?", *f.EventType) - } - - total, err := q.Count(ctx) - if err != nil { - return nil, 0, fmt.Errorf("count events: %w", err) - } - - pageSize := f.PageSize - if pageSize <= 0 { pageSize = 20 } - page := f.Page - if page <= 0 { page = 1 } - - err = q.Limit(pageSize).Offset((page - 1) * pageSize).Scan(ctx) - return rows, total, err -} - -func (s *pgStore) RunInTx(ctx context.Context, fn func(ctx context.Context, tx Tx) error) error { - return s.db.RunInTx(ctx, nil, func(ctx context.Context, bunTx bun.Tx) error { - return fn(ctx, &pgTx{db: bunTx}) - }) -} - -type pgTx struct{ db bun.Tx } - -func (t *pgTx) InsertTransferEvent(ctx context.Context, dao *TransferEventDao) (bool, error) { - res, err := t.db.NewInsert().Model(dao). - On("CONFLICT (event_id) DO NOTHING"). - Exec(ctx) - if err != nil { - return false, fmt.Errorf("insert transfer event: %w", err) - } - rows, _ := res.RowsAffected() - return rows > 0, nil -} - -func (t *pgTx) IncrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error { - _, err := t.db.NewInsert(). - TableExpr("token_balances"). - ColumnExpr("party_id, token_symbol, balance, updated_at"). - Value("?, ?, ?, NOW()", partyID, tokenSymbol, amount). - On("CONFLICT (party_id, token_symbol) DO UPDATE"). - Set("balance = token_balances.balance + EXCLUDED.balance"). - Set("updated_at = NOW()"). - Exec(ctx) - return err -} - -func (t *pgTx) DecrementBalance(ctx context.Context, partyID, tokenSymbol, amount string) error { - _, err := t.db.NewUpdate().TableExpr("token_balances"). - Set("balance = balance - ?", amount). - Set("updated_at = NOW()"). - Where("party_id = ? AND token_symbol = ?", partyID, tokenSymbol). - Exec(ctx) - return err -} - -func (t *pgTx) UpdateCheckpoint(ctx context.Context, offset int64) error { - _, err := t.db.NewUpdate().Model((*LedgerCheckpointDao)(nil)). - Set("last_processed_offset = ?", offset). - Set("updated_at = NOW()"). - Where("id = 1"). - Exec(ctx) - return err -} -``` - ---- - -### 6.6 Database Migrations (Go code) - -Package `indexerdb`, same pattern as `pkg/migrations/apidb/`. -Inline DAO structs per migration file keep migrations self-contained. - -```go -// pkg/migrations/indexerdb/migrations.go -package indexerdb - -import "github.com/uptrace/bun/migrate" - -var Migrations = migrate.NewMigrations() -``` - -```go -// pkg/migrations/indexerdb/1_create_ledger_checkpoints.go -package indexerdb - -import ( - "context" - "log" - "time" - - mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" - "github.com/uptrace/bun" -) - -func init() { - Migrations.MustRegister( - func(ctx context.Context, db *bun.DB) error { - log.Println("creating ledger_checkpoints table...") - type dao struct { - bun.BaseModel `bun:"table:ledger_checkpoints"` - ID int `bun:"id,pk,default:1"` - LastProcessedOffset int64 `bun:"last_processed_offset,notnull,default:0"` - LastTxID *string `bun:"last_tx_id,type:varchar(255)"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` - } - if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { - return err - } - _, err := db.ExecContext(ctx, - `INSERT INTO ledger_checkpoints (id, last_processed_offset) - VALUES (1, 0) ON CONFLICT DO NOTHING;`) - return err - }, - func(ctx context.Context, db *bun.DB) error { - log.Println("dropping ledger_checkpoints table...") - type dao struct { - bun.BaseModel `bun:"table:ledger_checkpoints"` - } - return mghelper.DropTables(ctx, db, (*dao)(nil)) - }, - ) -} -``` - -```go -// pkg/migrations/indexerdb/2_create_indexed_tokens.go -package indexerdb - -import ( - "context" - "log" - "time" - - mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" - "github.com/uptrace/bun" -) - -func init() { - Migrations.MustRegister( - func(ctx context.Context, db *bun.DB) error { - log.Println("creating indexed_tokens table...") - type dao struct { - bun.BaseModel `bun:"table:indexed_tokens"` - PackageID string `bun:"package_id,pk,type:varchar(255)"` - TokenSymbol string `bun:"token_symbol,unique,notnull,type:varchar(50)"` - ModuleName string `bun:"module_name,notnull,type:varchar(255)"` - TemplateName string `bun:"template_name,notnull,type:varchar(255)"` - Name *string `bun:"name,type:varchar(255)"` - Decimals int16 `bun:"decimals,notnull,default:18"` - IssuerPartyID *string `bun:"issuer_party_id,type:varchar(255)"` - AddedAt time.Time `bun:"added_at,nullzero,default:current_timestamp"` - } - return mghelper.CreateSchema(ctx, db, (*dao)(nil)) - }, - func(ctx context.Context, db *bun.DB) error { - log.Println("dropping indexed_tokens table...") - type dao struct { - bun.BaseModel `bun:"table:indexed_tokens"` - } - return mghelper.DropTables(ctx, db, (*dao)(nil)) - }, - ) -} -``` - -```go -// pkg/migrations/indexerdb/3_create_transfer_events.go -package indexerdb - -import ( - "context" - "log" - "time" - - mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" - "github.com/uptrace/bun" -) - -func init() { - Migrations.MustRegister( - func(ctx context.Context, db *bun.DB) error { - log.Println("creating transfer_events table...") - type dao struct { - bun.BaseModel `bun:"table:transfer_events"` - ID int64 `bun:"id,pk,autoincrement"` - EventID string `bun:"event_id,unique,notnull,type:varchar(512)"` - EventType string `bun:"event_type,notnull,type:varchar(20)"` - TokenSymbol string `bun:"token_symbol,notnull,type:varchar(50)"` - Amount string `bun:"amount,notnull,type:numeric(38,18)"` - FromPartyID *string `bun:"from_party_id,type:varchar(255)"` - ToPartyID *string `bun:"to_party_id,type:varchar(255)"` - Fingerprint *string `bun:"fingerprint,type:varchar(128)"` - EVMTxHash *string `bun:"evm_tx_hash,type:varchar(255)"` - EVMDestination *string `bun:"evm_destination,type:varchar(42)"` - TransactionID *string `bun:"transaction_id,type:varchar(255)"` - LedgerOffset int64 `bun:"ledger_offset,notnull"` - EffectiveTime time.Time `bun:"effective_time,notnull"` - IndexedAt time.Time `bun:"indexed_at,nullzero,default:current_timestamp"` - } - if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { - return err - } - // Indexes: all events for a party (sent or received), fingerprint, bridge - if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "from_party_id", "token_symbol"); err != nil { - return err - } - if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "to_party_id", "token_symbol"); err != nil { - return err - } - if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "fingerprint"); err != nil { - return err - } - if err := mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "evm_tx_hash"); err != nil { - return err - } - return mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "ledger_offset") - }, - func(ctx context.Context, db *bun.DB) error { - log.Println("dropping transfer_events table...") - type dao struct { - bun.BaseModel `bun:"table:transfer_events"` - } - return mghelper.DropTables(ctx, db, (*dao)(nil)) - }, - ) -} -``` - -```go -// pkg/migrations/indexerdb/4_create_token_balances.go -package indexerdb - -import ( - "context" - "log" - "time" - - mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" - "github.com/uptrace/bun" -) - -func init() { - Migrations.MustRegister( - func(ctx context.Context, db *bun.DB) error { - log.Println("creating token_balances table...") - // NOTE: No evm_address column. The indexer is Canton-native. - // EVM address resolution is the api-server's responsibility. - type dao struct { - bun.BaseModel `bun:"table:token_balances"` - PartyID string `bun:"party_id,pk,type:varchar(255)"` - TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` - Balance string `bun:"balance,notnull,default:0,type:numeric(38,18)"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` - } - if err := mghelper.CreateSchema(ctx, db, (*dao)(nil)); err != nil { - return err - } - return mghelper.CreateModelIndexes(ctx, db, (*dao)(nil), "token_symbol") - }, - func(ctx context.Context, db *bun.DB) error { - log.Println("dropping token_balances table...") - type dao struct { - bun.BaseModel `bun:"table:token_balances"` - } - return mghelper.DropTables(ctx, db, (*dao)(nil)) - }, - ) -} -``` - -```go -// pkg/migrations/indexerdb/5_create_token_stats.go -package indexerdb - -import ( - "context" - "log" - "time" - - mghelper "github.com/chainsafe/canton-middleware/pkg/pgutil/migrations" - "github.com/uptrace/bun" -) - -func init() { - Migrations.MustRegister( - func(ctx context.Context, db *bun.DB) error { - log.Println("creating token_stats table...") - type dao struct { - bun.BaseModel `bun:"table:token_stats"` - TokenSymbol string `bun:"token_symbol,pk,type:varchar(50)"` - TotalSupply string `bun:"total_supply,notnull,default:0,type:numeric(38,18)"` - HolderCount int64 `bun:"holder_count,notnull,default:0"` - UpdatedAt time.Time `bun:"updated_at,nullzero,default:current_timestamp"` - } - return mghelper.CreateSchema(ctx, db, (*dao)(nil)) - }, - func(ctx context.Context, db *bun.DB) error { - log.Println("dropping token_stats table...") - type dao struct { - bun.BaseModel `bun:"table:token_stats"` - } - return mghelper.DropTables(ctx, db, (*dao)(nil)) - }, - ) -} -``` - ---- - -### 6.7 Service Layer - -Canton-native query service. All methods keyed by `canton_party_id`. No userstore. -No EVM address. Mirrors `pkg/token/service.go` in structure. - -```go -// pkg/indexer/service/service.go -package service - -import ( - "context" - "github.com/chainsafe/canton-middleware/pkg/indexer/store" -) - -//go:generate mockery --name Service --output ./mocks -type Service interface { - ListTokens(ctx context.Context) ([]*TokenInfo, error) - GetTokenInfo(ctx context.Context, tokenSymbol string) (*TokenInfo, error) - // All balance/history queries take canton_party_id as the primary identifier. - // The caller (api-server) is responsible for resolving EVM address → party_id - // before calling these methods. - GetBalance(ctx context.Context, partyID, tokenSymbol string) (*Balance, error) - GetTransferHistory(ctx context.Context, partyID string, filter TransferFilter) (*TransferPage, error) -} - -type indexerService struct { - store store.Store -} - -func NewService(s store.Store) Service { - return &indexerService{store: s} -} - -func (s *indexerService) GetBalance(ctx context.Context, partyID, tokenSymbol string) (*Balance, error) { - dao, err := s.store.GetTokenBalance(ctx, partyID, tokenSymbol) - if err != nil { - return nil, fmt.Errorf("get balance: %w", err) - } - token, err := s.store.GetTokenStat(ctx, tokenSymbol) - if err != nil { - return nil, fmt.Errorf("get token: %w", err) - } - return toBalance(dao, token), nil -} - -func (s *indexerService) GetTransferHistory(ctx context.Context, partyID string, f TransferFilter) (*TransferPage, error) { - rows, total, err := s.store.ListTransferEvents(ctx, store.TransferEventFilter{ - PartyID: &partyID, - TokenSymbol: f.TokenSymbol, - EventType: f.EventType, - Page: f.Page, - PageSize: f.PageSize, - }) - if err != nil { - return nil, fmt.Errorf("get transfer history: %w", err) - } - return toTransferPage(rows, total, f), nil -} -``` - -```go -// pkg/indexer/service/types.go -package service - -type TokenInfo struct { - Symbol string `json:"symbol"` - Name string `json:"name"` - Decimals int `json:"decimals"` - TotalSupply string `json:"total_supply"` - HolderCount int64 `json:"holder_count"` -} - -// Balance is keyed by canton_party_id. The api-server maps EVM → party_id before -// calling the indexer and may re-map the response back to EVM context for its clients. -type Balance struct { - PartyID string `json:"party_id"` - TokenSymbol string `json:"token_symbol"` - Balance string `json:"balance"` // raw (18 decimals) - BalanceFormatted string `json:"balance_formatted"` // human readable - Decimals int `json:"decimals"` -} - -type TransferEvent struct { - EventID string `json:"event_id"` - EventType string `json:"event_type"` // MINT | BURN | TRANSFER - FromPartyID *string `json:"from_party_id"` - ToPartyID *string `json:"to_party_id"` - Amount string `json:"amount"` - AmountFormatted string `json:"amount_formatted"` - TokenSymbol string `json:"token_symbol"` - EVMTxHash *string `json:"evm_tx_hash,omitempty"` - LedgerOffset int64 `json:"ledger_offset"` - EffectiveTime string `json:"effective_time"` -} - -type TransferPage struct { - Total int `json:"total"` - Page int `json:"page"` - PageSize int `json:"page_size"` - Events []TransferEvent `json:"events"` -} - -type TransferFilter struct { - TokenSymbol *string - EventType *string - Page int - PageSize int -} -``` - ---- - -### 6.8 API / HTTP Layer - -**Auth: JWT only.** The JWT is issued by the api-server (or a shared auth service) after -authenticating the user via EVM signature. The JWT claims must contain `canton_party_id`. -The indexer validates the JWT signature against the shared JWKS endpoint and extracts -`canton_party_id` from claims — no userstore, no EVM signature verification here. - -**Endpoints use `partyID` as path param**, not EVM address. The api-server is the -translator between EVM world and Canton-native world. - -```go -// pkg/indexer/api/server.go -package api - -func RegisterRoutes(r chi.Router, svc service.Service, cfg AuthConfig, logger *zap.Logger) { - h := newHandler(svc, logger) - - // Public — no auth (totalSupply is public per ERC-20 spec) - r.Get("/v1/tokens", h.listTokens) - r.Get("/v1/tokens/{symbol}", h.getToken) - r.Get("/v1/tokens/{symbol}/totalSupply", h.getTotalSupply) - - // JWT-authenticated — scoped to the party_id in the JWT claims - r.Group(func(r chi.Router) { - r.Use(authMiddleware(cfg)) - r.Get("/v1/balance/{partyID}", h.getBalance) - r.Get("/v1/balance/{partyID}/{symbol}", h.getBalanceBySymbol) - r.Get("/v1/transfers/{partyID}", h.getTransfers) - r.Get("/v1/transfers/{partyID}/{symbol}", h.getTransfersBySymbol) - r.Get("/v1/events/{partyID}", h.getTransfers) // alias - }) -} -``` - -```go -// pkg/indexer/api/middleware.go -package api - -// AuthConfig holds the JWKS URL for JWT validation. -// No userstore reference — the indexer does not know about EVM addresses. -type AuthConfig struct { - JWKSUrl string -} - -// Claims are extracted from the JWT. The JWT is issued by the api-server -// and must carry canton_party_id so the indexer can scope queries. -type Claims struct { - CantonPartyID string `json:"canton_party_id"` - // Other standard JWT fields (exp, iat, sub) handled by the JWT library -} - -type principalKey struct{} - -// authMiddleware validates the JWT and stores the party_id in context. -// Only JWT Bearer tokens are accepted — no EVM signature verification. -func authMiddleware(cfg AuthConfig) func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - bearer := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") - if bearer == "" { - writeError(w, http.StatusUnauthorized, errors.New("Bearer token required")) - return - } - claims, err := validateJWT(bearer, cfg.JWKSUrl) - if err != nil { - writeError(w, http.StatusUnauthorized, err) - return - } - if claims.CantonPartyID == "" { - writeError(w, http.StatusUnauthorized, - errors.New("JWT missing canton_party_id claim")) - return - } - ctx := context.WithValue(r.Context(), principalKey{}, claims.CantonPartyID) - next.ServeHTTP(w, r.WithContext(ctx)) - }) - } -} - -// scopeCheck ensures the authenticated party can only read its own data. -func scopeCheck(r *http.Request, requestedPartyID string) error { - partyID, ok := r.Context().Value(principalKey{}).(string) - if !ok || partyID == "" { - return errors.New("unauthenticated") - } - if partyID != requestedPartyID { - return errors.New("access denied: can only query own party data") - } - return nil -} -``` - -```go -// pkg/indexer/api/handler.go -package api - -func (h *handler) getBalance(w http.ResponseWriter, r *http.Request) { - partyID := chi.URLParam(r, "partyID") - if err := scopeCheck(r, partyID); err != nil { - writeError(w, http.StatusForbidden, err) - return - } - symbol := chi.URLParam(r, "symbol") // may be "" for all-tokens variant - bal, err := h.svc.GetBalance(r.Context(), partyID, symbol) - if err != nil { - writeError(w, http.StatusInternalServerError, err) - return - } - writeJSON(w, http.StatusOK, bal) -} - -func (h *handler) getTransfers(w http.ResponseWriter, r *http.Request) { - partyID := chi.URLParam(r, "partyID") - if err := scopeCheck(r, partyID); err != nil { - writeError(w, http.StatusForbidden, err) - return - } - page, _ := strconv.Atoi(r.URL.Query().Get("page")) - pageSize, _ := strconv.Atoi(r.URL.Query().Get("page_size")) - symbol := r.URL.Query().Get("token") - evtType := r.URL.Query().Get("type") - - f := service.TransferFilter{Page: page, PageSize: pageSize} - if symbol != "" { f.TokenSymbol = &symbol } - if evtType != "" { f.EventType = &evtType } - - result, err := h.svc.GetTransferHistory(r.Context(), partyID, f) - if err != nil { - writeError(w, http.StatusInternalServerError, err) - return - } - writeJSON(w, http.StatusOK, result) -} -``` - -**How the api-server calls the indexer on behalf of an EVM client:** - -```go -// pkg/token/provider/indexer.go (new provider in api-server) -// The api-server resolves EVM → party_id, mints a short-lived JWT, calls indexer. - -func (p *IndexerProvider) GetBalance(ctx context.Context, tokenSymbol, fingerprint string) (string, error) { - // 1. Resolve fingerprint → canton_party_id via userstore (api-server's own DB) - user, err := p.userStore.GetUserByFingerprint(ctx, fingerprint) - if err != nil { - return "0", err - } - // 2. Mint a short-lived internal JWT with canton_party_id claim - jwt, err := p.jwtIssuer.IssuePartyJWT(*user.CantonPartyID) - if err != nil { - return "0", err - } - // 3. Call indexer HTTP API — indexer sees only party_id, never EVM address - return p.indexerClient.GetBalance(ctx, *user.CantonPartyID, tokenSymbol, jwt) -} -``` - ---- - -## 7. File & Directory Layout - -``` -canton-middleware/ -│ -├── cmd/ -│ ├── api-server/ existing -│ ├── relayer/ existing -│ └── indexer/ NEW -│ ├── main.go loads config → app/indexer.NewServer(cfg).Run() -│ └── migrate/ -│ └── main.go runs indexerdb migrations -│ -├── pkg/ -│ │ -│ ├── app/ -│ │ ├── api/ existing (api-server orchestrator) -│ │ └── indexer/ NEW (mirrors pkg/app/api/) -│ │ └── server.go wires streaming + fetcher + parser + -│ │ processor + service + HTTP server -│ │ -│ ├── cantonsdk/ -│ │ ├── bridge/ existing — unchanged -│ │ ├── token/ existing — unchanged -│ │ ├── ledger/ existing — unchanged -│ │ ├── lapi/v2/ existing — unchanged -│ │ └── streaming/ NEW — generic ledger streaming client -│ │ ├── client.go Subscribe(), runStream(), reconnect loop -│ │ └── types.go LedgerTransaction, LedgerEvent -│ │ -│ ├── indexer/ NEW — all indexer domain packages -│ │ │ -│ │ ├── fetcher/ -│ │ │ └── fetcher.go loads checkpoint → delegates to cantonsdk/streaming -│ │ │ -│ │ ├── parser/ -│ │ │ ├── parser.go routes LedgerTransaction → []ParsedEvent -│ │ │ ├── cip56.go decodeTokenTransferEvent() via cantonsdk/values -│ │ │ ├── whitelist.go ContractFilter, WhitelistFilter, AllFilter -│ │ │ └── types.go ParsedEvent, EventType (MINT/BURN/TRANSFER) -│ │ │ -│ │ ├── processor/ -│ │ │ └── processor.go atomic batch writer: events + balances + checkpoint -│ │ │ -│ │ ├── store/ -│ │ │ ├── model.go DAOs (no evm_address in TokenBalanceDao) -│ │ │ ├── store.go Store + Tx interfaces, TransferEventFilter -│ │ │ └── pg.go pgStore + pgTx (Bun ORM) -│ │ │ -│ │ ├── service/ -│ │ │ ├── service.go Service interface + impl, all methods by party_id -│ │ │ └── types.go TokenInfo, Balance, TransferEvent, TransferPage -│ │ │ -│ │ └── api/ HTTP layer (add graph/ here in Phase 2) -│ │ ├── server.go RegisterRoutes() on chi.Router -│ │ ├── handler.go listTokens, getBalance, getTransfers -│ │ ├── middleware.go authMiddleware (JWT only), scopeCheck -│ │ └── types.go JSON response types -│ │ -│ └── migrations/ -│ ├── apidb/ existing -│ └── indexerdb/ NEW -│ ├── migrations.go var Migrations = migrate.NewMigrations() -│ ├── 1_create_ledger_checkpoints.go -│ ├── 2_create_indexed_tokens.go -│ ├── 3_create_transfer_events.go -│ ├── 4_create_token_balances.go -│ └── 5_create_token_stats.go -│ -├── contracts/ -│ └── canton-erc20/daml/cip56-token/src/CIP56/ -│ ├── Events.daml MODIFIED — add TokenTransferEvent -│ ├── Config.daml MODIFIED — emit from IssuerMint/IssuerBurn -│ └── TransferFactory.daml MODIFIED — emit from transfer choice -│ -└── docs/ - ├── indexer-design.md this document - └── indexer-gh-issue.md GitHub issue (condensed) -``` - ---- - -## 8. Pseudo-code & Data Flows - -### Orchestrator — `pkg/app/indexer/server.go` - -```go -// pkg/app/indexer/server.go -package indexer - -type Server struct{ cfg *config.IndexerConfig } - -func NewServer(cfg *config.IndexerConfig) *Server { return &Server{cfg: cfg} } - -func (s *Server) Run() error { - ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) - defer stop() - - logger, _ := config.NewLogger(s.cfg.Logging) - defer logger.Sync() - - dbBun, err := pgutil.ConnectDB(&s.cfg.Database) - if err != nil { return err } - defer dbBun.Close() - - idxStore := indexerstore.NewStore(dbBun) - - ledgerClient, err := ledger.New(s.cfg.Canton) - if err != nil { return err } - defer ledgerClient.Close() - - streamClient := streaming.New(ledgerClient, s.cfg.Canton.IssuerParty) - - templateID := &lapiv2.Identifier{ - PackageId: s.cfg.Indexer.PackageID, - ModuleName: "CIP56", - EntityName: "TokenTransferEvent", - } - - var filter parser.ContractFilter - if len(s.cfg.Indexer.WhitelistedPackageIDs) > 0 { - filter = parser.NewWhitelistFilter(s.cfg.Indexer.WhitelistedPackageIDs) - } else { - filter = &parser.AllFilter{} - } - - txCh := make(chan *streaming.LedgerTransaction, 500) - parsedCh := make(chan []*parser.ParsedEvent, 100) - - f := fetcher.New(streamClient, idxStore, templateID, txCh) - p := parser.New(txCh, parsedCh, filter) - proc := processor.New(idxStore, parsedCh) - - svc := indexerservice.NewService(idxStore) // no userstore dependency - r := s.setupRouter(svc, logger) - go apphttp.ServeAndWait(ctx, r, logger, &s.cfg.Query.Server) - - g, ctx := errgroup.WithContext(ctx) - g.Go(func() error { return f.Start(ctx) }) - g.Go(func() error { return p.Start(ctx) }) - g.Go(func() error { return proc.Start(ctx) }) - - logger.Info("indexer running") - return g.Wait() -} - -func (s *Server) setupRouter(svc indexerservice.Service, logger *zap.Logger) chi.Router { - r := chi.NewRouter() - r.Use(middleware.RequestID, middleware.RealIP, middleware.Recoverer) - r.Use(middleware.Timeout(60 * time.Second)) - r.Get("/health", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("OK")) - }) - indexerapi.RegisterRoutes(r, svc, indexerapi.AuthConfig{ - JWKSUrl: s.cfg.Query.JWKSUrl, - }, logger) - return r -} -``` - -### Call flow: EVM client → api-server → indexer - -``` -EVM Client (MetaMask) - │ GET balance (EVM address, DEMO token) - ▼ -API Server - │ 1. Auth: VerifyEIP191Signature(evmAddress) ← pkg/auth/evm.go - │ 2. Resolve: userStore.GetUserByEVMAddress() ← userstore (api-server DB) - │ → canton_party_id - │ 3. Issue: jwtIssuer.IssuePartyJWT(canton_party_id) - │ 4. Call: indexer /v1/balance/{canton_party_id} - │ Authorization: Bearer - ▼ -Indexer - │ 5. Validate JWT → extract canton_party_id from claims - │ 6. scopeCheck: party_id matches URL param - │ 7. store.GetTokenBalance(canton_party_id, "DEMO") - │ 8. Return Balance{party_id, balance} - ▼ -API Server - │ 9. Map party_id → evm_address for client response (if needed) - ▼ -EVM Client ← {"balance": "1000000000000000000"} -``` - ---- - -## 9. Configuration - -```go -// pkg/config/indexer.go - -type IndexerConfig struct { - Logging LoggingConfig - Database DatabaseConfig // shared with api-server - Canton CantonConfig // same issuer credentials as api-server - Indexer IndexerOptions - Query IndexerQueryConfig -} - -type IndexerOptions struct { - WhitelistedPackageIDs []string `yaml:"whitelisted_package_ids"` - PackageID string `yaml:"package_id"` // TokenTransferEvent package - MaxReconnectBackoff time.Duration `yaml:"max_reconnect_backoff"` // default 60s -} - -type IndexerQueryConfig struct { - Server ServerConfig // host, port, timeouts - JWKSUrl string `yaml:"jwks_url"` // shared with api-server -} -``` - -```yaml -# config.indexer.yaml -logging: - level: info - format: json - -database: - host: localhost - port: 5432 - name: canton_middleware # same DB, indexer writes its own tables - user: postgres - password: ${POSTGRES_PASSWORD} - -canton: - endpoint: localhost:5011 - issuer_party: "Issuer::1220..." - auth: - type: oauth2 - client_id: ${CANTON_CLIENT_ID} - client_secret: ${CANTON_CLIENT_SECRET} - token_url: ${CANTON_TOKEN_URL} - -indexer: - package_id: "168483ce8a80e76f69f7392ceaa9ff57b1036b8fb41ccb3d410b087048195a92" - whitelisted_package_ids: - - "168483ce8a80e76f69f7392ceaa9ff57b1036b8fb41ccb3d410b087048195a92" # DEMO - - "" - max_reconnect_backoff: 60s - -query: - server: - host: 0.0.0.0 - port: 8082 - jwks_url: ${JWKS_URL} # same JWKS as api-server for JWT validation -``` - ---- - -## 10. Integration with API Server - -``` -Shared PostgreSQL (canton_middleware DB): - - public.* ← api-server writes - users ← EVM ↔ party_id mapping lives here, NOT in indexer - user_token_balances ← DEPRECATED after migration - bridge_events ← DEPRECATED after migration - token_metrics ← DEPRECATED after migration - - indexer.* ← indexer writes, api-server may read - ledger_checkpoints - indexed_tokens - transfer_events ← replaces bridge_events (richer, includes transfers) - token_balances ← replaces user_token_balances (keyed by party_id) - token_stats ← replaces token_metrics - -Option A (recommended Phase 1): - api-server issues a JWT → calls indexer HTTP API. - Clean separation. No shared DB reads from api-server side. - -Option B (simpler Phase 1 alternative): - api-server reads indexer.token_balances directly via SQL - (same DB, no HTTP hop needed). Requires api-server to know the indexer schema. -``` - -**Migration path:** -``` -Step 1 Deploy indexer. It builds indexer.* tables from offset 0. - Reconciler continues running in parallel. - -Step 2 Validate: compare reconciler balances vs indexer.token_balances. - Confirm TokenTransferEvent DAML upgrade is live and emitting. - -Step 3 Switch api-server token provider to call indexer API (or read indexer tables). - -Step 4 Disable reconciler. Remove after one release cycle. -``` - ---- - -## 11. Open Questions & Future Work - -| Question | Decision | -|---|---| -| DB: same instance? | Yes — same DB, indexer.* schema | -| ORM? | Bun — consistent with project | -| HTTP router? | chi — consistent with project | -| Query port? | 8082 (api=8080, relayer=8081) | -| Canton auth? | Reuse issuer OAuth2 creds from existing config | -| JWT claim name for party_id? | `canton_party_id` (custom claim) | -| api-server call mode? | Option A (HTTP) initially; can collapse to Option B (shared DB read) | -| Docker Compose? | Add `indexer` + `indexer-migrate` services | - -### Phase 2 - -1. **GraphQL** — add `pkg/indexer/graph/` alongside `pkg/indexer/api/` -2. **WebSocket push** — real-time event stream from processor → subscribed clients -3. **Canton Coin** — same code, different package IDs + Super Validator node -4. **Metrics** — `indexer_lag_offsets`, `events_per_second`, `batch_commit_duration_ms` -5. **Backfill** — `cmd/indexer/backfill/` to replay from offset 0 after package upgrades - ---- - -*Created: 2026-03-02* -*CIP Reference: https://github.com/canton-foundation/cips/blob/main/cip-0086/cip-0086.md*