|
| 1 | +package tapdb |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + |
| 7 | + "github.com/lightninglabs/taproot-assets/asset" |
| 8 | + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" |
| 9 | + "github.com/lightninglabs/taproot-assets/universe/supplycommit" |
| 10 | +) |
| 11 | + |
| 12 | +// SupplySyncerStore implements the persistent storage for supply syncing |
| 13 | +// operations. It provides methods to store supply updates without requiring |
| 14 | +// a supply commitment transition. |
| 15 | +type SupplySyncerStore struct { |
| 16 | + db BatchedUniverseTree |
| 17 | +} |
| 18 | + |
| 19 | +// NewSupplySyncerStore creates a new supply syncer DB store handle. |
| 20 | +func NewSupplySyncerStore(db BatchedUniverseTree) *SupplySyncerStore { |
| 21 | + return &SupplySyncerStore{ |
| 22 | + db: db, |
| 23 | + } |
| 24 | +} |
| 25 | + |
| 26 | +// LogRemoteFetch stores a batch of supply update events fetched from a remote |
| 27 | +// universe to the database and logs the max fetch block height in the same |
| 28 | +// db transaction. |
| 29 | +func (s *SupplySyncerStore) LogRemoteFetch(ctx context.Context, |
| 30 | + spec asset.Specifier, |
| 31 | + updates []supplycommit.SupplyUpdateEvent) error { |
| 32 | + |
| 33 | + // If no updates were provided, return early without error. |
| 34 | + if len(updates) == 0 { |
| 35 | + return nil |
| 36 | + } |
| 37 | + |
| 38 | + // Find the highest block height from all the supply update events. |
| 39 | + var maxBlockHeight uint32 |
| 40 | + for _, update := range updates { |
| 41 | + if height := update.BlockHeight(); height > maxBlockHeight { |
| 42 | + maxBlockHeight = height |
| 43 | + } |
| 44 | + } |
| 45 | + |
| 46 | + // All updates must have a valid block height. |
| 47 | + if maxBlockHeight == 0 { |
| 48 | + return fmt.Errorf("all supply updates must have a valid " + |
| 49 | + "block height greater than 0") |
| 50 | + } |
| 51 | + |
| 52 | + // Extract the group key for logging. |
| 53 | + groupKey, err := spec.UnwrapGroupKeyOrErr() |
| 54 | + if err != nil { |
| 55 | + return fmt.Errorf("group key must be specified for supply "+ |
| 56 | + "syncer: %w", err) |
| 57 | + } |
| 58 | + |
| 59 | + var writeTx BaseUniverseStoreOptions |
| 60 | + return s.db.ExecTx(ctx, &writeTx, func(dbTx BaseUniverseStore) error { |
| 61 | + // Reuse the internal supply update logic which handles all |
| 62 | + // the complex subtree and root tree updates within the |
| 63 | + // transaction. |
| 64 | + _, err := applySupplyUpdatesInternal(ctx, dbTx, spec, updates) |
| 65 | + if err != nil { |
| 66 | + return err |
| 67 | + } |
| 68 | + |
| 69 | + // Log the latest synced block height for this asset group. |
| 70 | + groupKeyBytes := groupKey.SerializeCompressed() |
| 71 | + err = dbTx.UpsertSupplySyncerLog( |
| 72 | + ctx, sqlc.UpsertSupplySyncerLogParams{ |
| 73 | + GroupKey: groupKeyBytes, |
| 74 | + MaxFetchedBlockHeight: sqlInt32(maxBlockHeight), |
| 75 | + }, |
| 76 | + ) |
| 77 | + if err != nil { |
| 78 | + return fmt.Errorf("failed to log synced block "+ |
| 79 | + "height: %w", err) |
| 80 | + } |
| 81 | + |
| 82 | + return nil |
| 83 | + }) |
| 84 | +} |
| 85 | + |
| 86 | +// LogRemoteInsert logs that supply leaves have been successfully inserted |
| 87 | +// into a remote universe. |
| 88 | +func (s *SupplySyncerStore) LogRemoteInsert(ctx context.Context, |
| 89 | + spec asset.Specifier, leaves supplycommit.SupplyLeaves) error { |
| 90 | + |
| 91 | + // Calculate the total number of leaves. |
| 92 | + totalLeaves := len(leaves.IssuanceLeafEntries) + |
| 93 | + len(leaves.BurnLeafEntries) + |
| 94 | + len(leaves.IgnoreLeafEntries) |
| 95 | + |
| 96 | + // If no leaves were provided, return early without error. |
| 97 | + if totalLeaves == 0 { |
| 98 | + return nil |
| 99 | + } |
| 100 | + |
| 101 | + // Find the highest block height from all the supply leaves. |
| 102 | + var maxBlockHeight uint32 |
| 103 | + for _, leafEntry := range leaves.IssuanceLeafEntries { |
| 104 | + if height := leafEntry.BlockHeight(); height > maxBlockHeight { |
| 105 | + maxBlockHeight = height |
| 106 | + } |
| 107 | + } |
| 108 | + for _, leafEntry := range leaves.BurnLeafEntries { |
| 109 | + if height := leafEntry.BlockHeight(); height > maxBlockHeight { |
| 110 | + maxBlockHeight = height |
| 111 | + } |
| 112 | + } |
| 113 | + for _, leafEntry := range leaves.IgnoreLeafEntries { |
| 114 | + if height := leafEntry.BlockHeight(); height > maxBlockHeight { |
| 115 | + maxBlockHeight = height |
| 116 | + } |
| 117 | + } |
| 118 | + |
| 119 | + // All leaves must have a valid block height. |
| 120 | + if maxBlockHeight == 0 { |
| 121 | + return fmt.Errorf("all supply leaves must have a valid " + |
| 122 | + "block height greater than 0") |
| 123 | + } |
| 124 | + |
| 125 | + // Extract the group key for the log entry. |
| 126 | + groupKey, err := spec.UnwrapGroupKeyOrErr() |
| 127 | + if err != nil { |
| 128 | + return fmt.Errorf("group key must be specified for supply "+ |
| 129 | + "syncer log: %w", err) |
| 130 | + } |
| 131 | + |
| 132 | + var writeTx BaseUniverseStoreOptions |
| 133 | + return s.db.ExecTx(ctx, &writeTx, func(dbTx BaseUniverseStore) error { |
| 134 | + groupKeyBytes := groupKey.SerializeCompressed() |
| 135 | + params := sqlc.UpsertSupplySyncerLogParams{ |
| 136 | + GroupKey: groupKeyBytes, |
| 137 | + MaxInsertedBlockHeight: sqlInt32(maxBlockHeight), |
| 138 | + } |
| 139 | + err := dbTx.UpsertSupplySyncerLog(ctx, params) |
| 140 | + if err != nil { |
| 141 | + return fmt.Errorf("failed to log remote insert: %w", |
| 142 | + err) |
| 143 | + } |
| 144 | + |
| 145 | + return nil |
| 146 | + }) |
| 147 | +} |
0 commit comments