diff --git a/cmd/commands/universe.go b/cmd/commands/universe.go index ecbc0d956..fc74e9548 100644 --- a/cmd/commands/universe.go +++ b/cmd/commands/universe.go @@ -1638,21 +1638,6 @@ var fetchSupplyCommitCmd = cli.Command{ Usage: "the group key of the asset group to fetch", Required: true, }, - &cli.StringSliceFlag{ - Name: "issuance_leaf_keys", - Usage: "a list of issuance leaf keys to fetch " + - "inclusion proofs for", - }, - &cli.StringSliceFlag{ - Name: "burn_leaf_keys", - Usage: "a list of burn leaf keys to fetch inclusion " + - "proofs for", - }, - &cli.StringSliceFlag{ - Name: "ignore_leaf_keys", - Usage: "a list of ignore leaf keys to fetch " + - "inclusion proofs for", - }, }, Action: fetchSupplyCommit, } @@ -1668,26 +1653,6 @@ func fetchSupplyCommit(ctx *cli.Context) error { }, } - issuanceKeys, err := parseHexStrings( - ctx.StringSlice("issuance_leaf_keys"), - ) - if err != nil { - return fmt.Errorf("invalid issuance_leaf_keys: %w", err) - } - req.IssuanceLeafKeys = issuanceKeys - - burnKeys, err := parseHexStrings(ctx.StringSlice("burn_leaf_keys")) - if err != nil { - return fmt.Errorf("invalid burn_leaf_keys: %w", err) - } - req.BurnLeafKeys = burnKeys - - ignoreKeys, err := parseHexStrings(ctx.StringSlice("ignore_leaf_keys")) - if err != nil { - return fmt.Errorf("invalid ignore_leaf_keys: %w", err) - } - req.IgnoreLeafKeys = ignoreKeys - resp, err := client.FetchSupplyCommit(cliCtx, req) if err != nil { return err @@ -1719,6 +1684,21 @@ var fetchSupplyLeavesCmd = cli.Command{ Usage: "the end of the block height range", Required: true, }, + &cli.StringSliceFlag{ + Name: "issuance_leaf_keys", + Usage: "a list of issuance leaf keys to fetch " + + "inclusion proofs for", + }, + &cli.StringSliceFlag{ + Name: "burn_leaf_keys", + Usage: "a list of burn leaf keys to fetch inclusion " + + "proofs for", + }, + &cli.StringSliceFlag{ + Name: "ignore_leaf_keys", + Usage: "a list of ignore leaf keys to fetch " + + "inclusion proofs for", + }, }, Action: fetchSupplyLeaves, } @@ -1736,6 +1716,26 @@ func fetchSupplyLeaves(ctx *cli.Context) error { BlockHeightEnd: uint32(ctx.Uint64("block_height_end")), } + issuanceKeys, err := parseHexStrings( + ctx.StringSlice("issuance_leaf_keys"), + ) + if err != nil { + return fmt.Errorf("invalid issuance_leaf_keys: %w", err) + } + req.IssuanceLeafKeys = issuanceKeys + + burnKeys, err := parseHexStrings(ctx.StringSlice("burn_leaf_keys")) + if err != nil { + return fmt.Errorf("invalid burn_leaf_keys: %w", err) + } + req.BurnLeafKeys = burnKeys + + ignoreKeys, err := parseHexStrings(ctx.StringSlice("ignore_leaf_keys")) + if err != nil { + return fmt.Errorf("invalid ignore_leaf_keys: %w", err) + } + req.IgnoreLeafKeys = ignoreKeys + resp, err := client.FetchSupplyLeaves(cliCtx, req) if err != nil { return err diff --git a/config.go b/config.go index fb36b1862..3c063d9c0 100644 --- a/config.go +++ b/config.go @@ -19,6 +19,7 @@ import ( "github.com/lightninglabs/taproot-assets/tapgarden" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/signal" @@ -195,10 +196,15 @@ type Config struct { // SupplyCommitManager is a service that is used to manage supply // commitments for assets. Supply commitments are issuer published // attestations of the total supply of an asset. - SupplyCommitManager *supplycommit.MultiStateMachineManager + SupplyCommitManager *supplycommit.Manager IgnoreChecker *tapdb.CachingIgnoreChecker + // SupplyVerifyManager is a service that is used to verify supply + // commitments for assets. Supply commitments are issuer published + // attestations of the total supply of an asset. + SupplyVerifyManager *supplyverifier.Manager + UniverseArchive *universe.Archive UniverseSyncer universe.Syncer diff --git a/docs/release-notes/release-notes-0.7.0.md b/docs/release-notes/release-notes-0.7.0.md index 0ea50fe0e..ffd16f424 100644 --- a/docs/release-notes/release-notes-0.7.0.md +++ b/docs/release-notes/release-notes-0.7.0.md @@ -58,6 +58,7 @@ - https://github.com/lightninglabs/taproot-assets/pull/1587 - https://github.com/lightninglabs/taproot-assets/pull/1716 - https://github.com/lightninglabs/taproot-assets/pull/1675 + - https://github.com/lightninglabs/taproot-assets/pull/1674 - A new [address version 2 was introduced that supports grouped assets and custom (sender-defined) diff --git a/fn/func.go b/fn/func.go index f054557ae..5feb7e3e8 100644 --- a/fn/func.go +++ b/fn/func.go @@ -93,6 +93,29 @@ func MapErr[I, O any, S []I](s S, f func(I) (O, error)) ([]O, error) { return output, nil } +// MapErrWithPtr applies the given fallible mapping function to each element of +// the given slice and generates a new slice. This is identical to MapErr, but +// can be used when the callback returns a pointer, and returns early if any +// single mapping fails. +func MapErrWithPtr[I, O any, S []I](s S, f func(I) (*O, error)) ([]O, error) { + output := make([]O, len(s)) + for i, x := range s { + outPtr, err := f(x) + if err != nil { + return nil, err + } + + if outPtr == nil { + return nil, fmt.Errorf("nil pointer returned for "+ + "item %d", i) + } + + output[i] = *outPtr + } + + return output, nil +} + // FlatMapErr applies the given mapping function to each element of the given // slice, concatenates the results into a new slice, and returns an error if // the mapping function fails. diff --git a/itest/assertions.go b/itest/assertions.go index a2038378e..2e746748b 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -2786,8 +2786,9 @@ func UpdateAndMineSupplyCommit(t *testing.T, ctx context.Context, // it when the specified condition is met. func WaitForSupplyCommit(t *testing.T, ctx context.Context, tapd unirpc.UniverseClient, groupKeyBytes []byte, + spentCommitOutpoint fn.Option[wire.OutPoint], condition func(*unirpc.FetchSupplyCommitResponse) bool, -) *unirpc.FetchSupplyCommitResponse { +) (*unirpc.FetchSupplyCommitResponse, wire.OutPoint) { groupKeyReq := &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ GroupKeyBytes: groupKeyBytes, @@ -2796,12 +2797,30 @@ func WaitForSupplyCommit(t *testing.T, ctx context.Context, var fetchResp *unirpc.FetchSupplyCommitResponse var err error - require.Eventually(t, func() bool { - fetchResp, err = tapd.FetchSupplyCommit( - ctx, &unirpc.FetchSupplyCommitRequest{ - GroupKey: groupKeyReq, + // By default, we start the fetch from the very first commitment. + // If a spent outpoint is given, we start from there. + req := &unirpc.FetchSupplyCommitRequest{ + GroupKey: groupKeyReq, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, + }, + } + + // nolint: lll + spentCommitOutpoint.WhenSome(func(outPoint wire.OutPoint) { + req = &unirpc.FetchSupplyCommitRequest{ + GroupKey: groupKeyReq, + Locator: &unirpc.FetchSupplyCommitRequest_SpentCommitOutpoint{ + SpentCommitOutpoint: &taprpc.OutPoint{ + Txid: outPoint.Hash[:], + OutputIndex: outPoint.Index, + }, }, - ) + } + }) + + require.Eventually(t, func() bool { + fetchResp, err = tapd.FetchSupplyCommit(ctx, req) if err != nil { return false } @@ -2809,5 +2828,19 @@ func WaitForSupplyCommit(t *testing.T, ctx context.Context, return fetchResp != nil && condition(fetchResp) }, defaultWaitTimeout, time.Second) - return fetchResp + // Return the supply commit outpoint used to fetch the next supply + // commitment. The next commitment is retrieved by referencing the + // outpoint of the previously spent commitment. + require.NotNil(t, fetchResp) + + var msgTx wire.MsgTx + err = msgTx.Deserialize(bytes.NewReader(fetchResp.ChainData.Txn)) + require.NoError(t, err) + + supplyCommitOutpoint := wire.OutPoint{ + Hash: msgTx.TxHash(), + Index: fetchResp.ChainData.TxOutIdx, + } + + return fetchResp, supplyCommitOutpoint } diff --git a/itest/supply_commit_mint_burn_test.go b/itest/supply_commit_mint_burn_test.go index 13319c4ee..0b9bc4c11 100644 --- a/itest/supply_commit_mint_burn_test.go +++ b/itest/supply_commit_mint_burn_test.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" taprootassets "github.com/lightninglabs/taproot-assets" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/taprpc" @@ -48,18 +49,19 @@ func testSupplyCommitMintBurn(t *harnessTest) { // Update the on-chain supply commitment for the asset group. // // TODO(roasbeef): still rely on the time based ticker here? - t.Log("Updating and mining supply commitment for asset group") + t.Log("Create first supply commitment tx for asset group") UpdateAndMineSupplyCommit( t.t, ctxb, t.tapd, t.lndHarness.Miner().Client, groupKeyBytes, 1, ) // Fetch the latest supply commitment for the asset group. - t.Log("Fetching supply commitment to verify mint leaves") - fetchResp := WaitForSupplyCommit( - t.t, ctxb, t.tapd, groupKeyBytes, + t.Log("Fetching first supply commitment to verify mint leaves") + fetchResp, supplyOutpoint := WaitForSupplyCommit( + t.t, ctxb, t.tapd, groupKeyBytes, fn.None[wire.OutPoint](), func(resp *unirpc.FetchSupplyCommitResponse) bool { - return resp.BlockHeight > 0 && len(resp.BlockHash) > 0 + return resp.ChainData.BlockHeight > 0 && + len(resp.ChainData.BlockHash) > 0 }, ) @@ -72,7 +74,7 @@ func testSupplyCommitMintBurn(t *harnessTest) { // Verify the issuance leaf inclusion in the supply tree. AssertSubtreeInclusionProof( - t, fetchResp.SupplyCommitmentRoot.RootHash, + t, fetchResp.ChainData.SupplyRootHash, fetchResp.IssuanceSubtreeRoot, ) @@ -106,8 +108,6 @@ func testSupplyCommitMintBurn(t *harnessTest) { ) t.Log("Updating supply commitment after second mint") - - // Update and mine the supply commitment after second mint. UpdateAndMineSupplyCommit( t.t, ctxb, t.tapd, t.lndHarness.Miner().Client, groupKeyBytes, 1, @@ -119,8 +119,8 @@ func testSupplyCommitMintBurn(t *harnessTest) { expectedTotal := int64( mintReq.Asset.Amount + secondMintReq.Asset.Amount, ) - fetchResp = WaitForSupplyCommit( - t.t, ctxb, t.tapd, groupKeyBytes, + fetchResp, supplyOutpoint = WaitForSupplyCommit( + t.t, ctxb, t.tapd, groupKeyBytes, fn.Some(supplyOutpoint), func(resp *unirpc.FetchSupplyCommitResponse) bool { return resp.IssuanceSubtreeRoot != nil && resp.IssuanceSubtreeRoot.RootNode.RootSum == expectedTotal //nolint:lll @@ -175,7 +175,8 @@ func testSupplyCommitMintBurn(t *harnessTest) { t.Log("Verifying supply tree includes burn leaves") // Fetch and verify the supply tree now includes burn leaves. - fetchResp = WaitForSupplyCommit(t.t, ctxb, t.tapd, groupKeyBytes, + fetchResp, _ = WaitForSupplyCommit( + t.t, ctxb, t.tapd, groupKeyBytes, fn.Some(supplyOutpoint), func(resp *unirpc.FetchSupplyCommitResponse) bool { return resp.BurnSubtreeRoot != nil && resp.BurnSubtreeRoot.RootNode.RootSum == int64(burnAmt) //nolint:lll @@ -184,7 +185,7 @@ func testSupplyCommitMintBurn(t *harnessTest) { // Verify the burn subtree inclusion in the supply tree. AssertSubtreeInclusionProof( - t, fetchResp.SupplyCommitmentRoot.RootHash, + t, fetchResp.ChainData.SupplyRootHash, fetchResp.BurnSubtreeRoot, ) @@ -234,16 +235,16 @@ func testSupplyCommitMintBurn(t *harnessTest) { block := finalMinedBlocks[0] blockHash, _ := t.lndHarness.Miner().GetBestBlock() - fetchBlockHash, err := chainhash.NewHash(fetchResp.BlockHash) + fetchBlockHash, err := chainhash.NewHash(fetchResp.ChainData.BlockHash) require.NoError(t.t, err) require.True(t.t, fetchBlockHash.IsEqual(blockHash)) // Re-compute the supply commitment root hash from the latest fetch, // then use that to derive the expected commitment output. supplyCommitRootHash := fn.ToArray[[32]byte]( - fetchResp.SupplyCommitmentRoot.RootHash, + fetchResp.ChainData.SupplyRootHash, ) - internalKey, err := btcec.ParsePubKey(fetchResp.AnchorTxOutInternalKey) + internalKey, err := btcec.ParsePubKey(fetchResp.ChainData.InternalKey) require.NoError(t.t, err) expectedTxOut, _, err := supplycommit.RootCommitTxOut( internalKey, nil, supplyCommitRootHash, diff --git a/itest/supply_commit_test.go b/itest/supply_commit_test.go index 1e3f11777..1eaf1429d 100644 --- a/itest/supply_commit_test.go +++ b/itest/supply_commit_test.go @@ -206,18 +206,19 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { _, newIgnoreBlockHeight := t.lndHarness.Miner().GetBestBlock() // Ignore the asset outpoint owned by the secondary node. + ignoreAmt := sendAssetAmount ignoreReq := &unirpc.IgnoreAssetOutPointRequest{ AssetOutPoint: &taprpc.AssetOutPoint{ AnchorOutPoint: transferOutput.Anchor.Outpoint, AssetId: rpcAsset.AssetGenesis.AssetId, ScriptKey: transferOutput.ScriptKey, }, - Amount: sendAssetAmount, + Amount: ignoreAmt, } respIgnore, err := t.tapd.IgnoreAssetOutPoint(ctxb, ignoreReq) require.NoError(t.t, err) require.NotNil(t.t, respIgnore) - require.EqualValues(t.t, sendAssetAmount, respIgnore.Leaf.RootSum) + require.EqualValues(t.t, ignoreAmt, respIgnore.Leaf.RootSum) // We also ignore our change output, so we can later verify that the // proof verifier correctly denies spending the change output. @@ -247,15 +248,13 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ GroupKeyBytes: groupKeyBytes, }, - IgnoreLeafKeys: [][]byte{ - respIgnore.LeafKey, - respIgnore2.LeafKey, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, }, }, ) require.Nil(t.t, fetchRespNil) - require.ErrorContains(t.t, err, "supply commitment not found for "+ - "asset group with key") + require.ErrorContains(t.t, err, "commitment not found") t.Log("Update on-chain supply commitment for asset group") @@ -285,9 +284,8 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ GroupKeyBytes: groupKeyBytes, }, - IgnoreLeafKeys: [][]byte{ - respIgnore.LeafKey, - respIgnore2.LeafKey, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, }, }, ) @@ -296,19 +294,25 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { // If the fetch response has no block height or hash, // it means that the supply commitment transaction has not // been mined yet, so we should retry. - if fetchResp.BlockHeight == 0 || len(fetchResp.BlockHash) == 0 { + if fetchResp.ChainData.BlockHeight == 0 || + len(fetchResp.ChainData.BlockHash) == 0 { + return false } // Once the ignore tree includes the ignored asset outpoint, we // know that the supply commitment has been updated. + if fetchResp.IgnoreSubtreeRoot == nil { + return false + } + return fetchResp.IgnoreSubtreeRoot.RootNode.RootSum == int64(sendAssetAmount+sendChangeAmount) }, defaultWaitTimeout, time.Second) // Verify that the supply commitment tree commits to the ignore subtree. supplyCommitRootHash := fn.ToArray[[32]byte]( - fetchResp.SupplyCommitmentRoot.RootHash, + fetchResp.ChainData.SupplyRootHash, ) // Formulate the ignore leaf node as it should appear in the supply @@ -328,10 +332,27 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { ignoreRootLeafKey, supplyTreeIgnoreLeafNode, ) + // Now fetch the inclusion proofs using FetchSupplyLeaves instead of + // FetchSupplyCommit. + t.Log("Fetch supply leaves with inclusion proofs") + // nolint: lll + fetchLeavesResp, err := t.tapd.FetchSupplyLeaves( + ctxb, &unirpc.FetchSupplyLeavesRequest{ + GroupKey: &unirpc.FetchSupplyLeavesRequest_GroupKeyBytes{ + GroupKeyBytes: groupKeyBytes, + }, + IgnoreLeafKeys: [][]byte{ + respIgnore.LeafKey, + respIgnore2.LeafKey, + }, + }, + ) + require.NoError(t.t, err) + // Unmarshal ignore tree leaf inclusion proof to verify that the // ignored asset outpoint is included in the ignore tree. - require.Len(t.t, fetchResp.IgnoreLeafInclusionProofs, 2) - inclusionProofBytes := fetchResp.IgnoreLeafInclusionProofs[0] + require.Len(t.t, fetchLeavesResp.IgnoreLeafInclusionProofs, 2) + inclusionProofBytes := fetchLeavesResp.IgnoreLeafInclusionProofs[0] // Verify that the ignore tree root can be computed from the ignore leaf // inclusion proof. @@ -360,18 +381,18 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { // Ensure that the block hash and height matches the values in the fetch // response. - fetchBlockHash, err := chainhash.NewHash(fetchResp.BlockHash) + fetchBlockHash, err := chainhash.NewHash(fetchResp.ChainData.BlockHash) require.NoError(t.t, err) require.True(t.t, fetchBlockHash.IsEqual(blockHash)) - require.EqualValues(t.t, blockHeight, fetchResp.BlockHeight) + require.EqualValues(t.t, blockHeight, fetchResp.ChainData.BlockHeight) // We expect two transactions in the block: // 1. The supply commitment transaction. // 2. The coinbase transaction. require.Len(t.t, block.Transactions, 2) - internalKey, err := btcec.ParsePubKey(fetchResp.AnchorTxOutInternalKey) + internalKey, err := btcec.ParsePubKey(fetchResp.ChainData.InternalKey) require.NoError(t.t, err) expectedTxOut, _, err := supplycommit.RootCommitTxOut( @@ -406,7 +427,9 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { } require.True(t.t, foundCommitTxOut) - require.EqualValues(t.t, actualBlockTxIndex, fetchResp.BlockTxIndex) + require.EqualValues( + t.t, actualBlockTxIndex, fetchResp.ChainData.TxIndex, + ) // If we try to ignore the same asset outpoint using the secondary // node, it should fail because the secondary node does not have access @@ -481,14 +504,88 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { withError("is ignored"), ) - // TODO(ffranr): The above only tests that the node that issued the - // ignore request has it in its ignore tree and can then deny spending - // it. What we should also test is that the secondary node can sync the - // ignore tree and then also deny spending the ignored asset outpoint - // they received from the primary node. - // Another test case we should add is that a node that _does not_ sync - // the ignore tree can _send_ an ignored asset, but a synced node will - // deny accepting it (transfer will never complete). + t.Log("Fetch first supply commitment from universe server") + + // Ensure that the supply commitment was pushed to the universe server + // and that it is retrievable. + var uniFetchResp *unirpc.FetchSupplyCommitResponse + require.Eventually(t.t, func() bool { + // nolint: lll + uniFetchResp, err = t.universeServer.service.FetchSupplyCommit( + ctxb, &unirpc.FetchSupplyCommitRequest{ + GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ + GroupKeyBytes: groupKeyBytes, + }, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, + }, + }, + ) + require.NoError(t.t, err) + + // If the fetch response does not include a block height, the + // supply commitment transaction has not been mined yet, so we + // should retry. + if uniFetchResp.ChainData.BlockHeight == 0 { + return false + } + + return true + }, defaultWaitTimeout, time.Second) + + // Assert universe supply commitment fetch response. + require.Len(t.t, uniFetchResp.IssuanceLeaves, 1) + require.Len(t.t, uniFetchResp.BurnLeaves, 0) + require.Len(t.t, uniFetchResp.IgnoreLeaves, 2) + + // Assert issuance leaf properties. + issuanceLeaf := uniFetchResp.IssuanceLeaves[0] + require.EqualValues( + t.t, rpcAsset.Amount, issuanceLeaf.LeafNode.RootSum, + ) + + // Assert ignored leaf properties. + // + // Determine which ignore leaf was the first one we added, so we + // can assert its properties. + firstIgnoreLeaf := uniFetchResp.IgnoreLeaves[0] + secondIgnoreLeaf := uniFetchResp.IgnoreLeaves[1] + if firstIgnoreLeaf.LeafNode.RootSum != int64(ignoreAmt) { + firstIgnoreLeaf, secondIgnoreLeaf = secondIgnoreLeaf, + firstIgnoreLeaf + } + + require.EqualValues(t.t, ignoreAmt, firstIgnoreLeaf.LeafNode.RootSum) + require.EqualValues( + t.t, rpcAsset.Amount-sendAssetAmount, + uint32(secondIgnoreLeaf.LeafNode.RootSum), + ) + + // Assert supply subtree root properties. + require.NotNil(t.t, uniFetchResp.IssuanceSubtreeRoot) + require.NotNil(t.t, uniFetchResp.BurnSubtreeRoot) + require.NotNil(t.t, uniFetchResp.IgnoreSubtreeRoot) + + // Assert that the issuance subtree root sum matches the total + // amount of issued assets. + require.EqualValues( + t.t, rpcAsset.Amount, + uniFetchResp.IssuanceSubtreeRoot.RootNode.RootSum, + ) + + // Assert that the burn subtree root sum is zero, as no assets have + // been burned. + require.EqualValues( + t.t, 0, + uniFetchResp.BurnSubtreeRoot.RootNode.RootSum, + ) + + // Assert that the ignore subtree root sum equals the total issued + // amount, since the entire issuance has been recorded as ignored. + require.EqualValues( + t.t, rpcAsset.Amount, + uniFetchResp.IgnoreSubtreeRoot.RootNode.RootSum, + ) } // AssertInclusionProof checks that the inclusion proof for a given leaf key diff --git a/log.go b/log.go index 8a8ced315..06949d40b 100644 --- a/log.go +++ b/log.go @@ -16,6 +16,7 @@ import ( "github.com/lightninglabs/taproot-assets/tapsend" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/signal" ) @@ -115,6 +116,10 @@ func SetupLoggers(root *build.SubLoggerManager, root, supplycommit.Subsystem, interceptor, supplycommit.UseLogger, ) + AddSubLogger( + root, supplycommit.Subsystem, interceptor, + supplyverifier.UseLogger, + ) AddSubLogger( root, commitment.Subsystem, interceptor, commitment.UseLogger, ) diff --git a/mssmt/compacted_tree.go b/mssmt/compacted_tree.go index 84246e502..f8b6383e0 100644 --- a/mssmt/compacted_tree.go +++ b/mssmt/compacted_tree.go @@ -521,6 +521,68 @@ func (t *CompactedTree) Copy(ctx context.Context, targetTree Tree) error { return nil } +// CopyFilter copies all the key-value pairs from the source tree into the +// target tree that pass the filter callback. The filter callback is invoked for +// each leaf-key pair. +func (t *CompactedTree) CopyFilter(ctx context.Context, targetTree Tree, + filterFunc CopyFilterPredicate) error { + + var leaves map[[hashSize]byte]*LeafNode + err := t.store.View(ctx, func(tx TreeStoreViewTx) error { + root, err := tx.RootNode() + if err != nil { + return fmt.Errorf("error getting root node: %w", err) + } + + // Optimization: If the source tree is empty, there's nothing to + // copy. + if IsEqualNode(root, EmptyTree[0]) { + leaves = make(map[[hashSize]byte]*LeafNode) + return nil + } + + // Start recursive collection from the root at depth 0. + leaves, err = collectLeavesRecursive(ctx, tx, root, 0) + if err != nil { + return fmt.Errorf("error collecting leaves: %w", err) + } + + return nil + }) + if err != nil { + return err + } + + // Pass the leaves through the filter callback. + if filterFunc != nil { + var filteredLeaves = make(map[[hashSize]byte]*LeafNode) + + for leafKey, leafNode := range leaves { + include, err := filterFunc(leafKey, *leafNode) + if err != nil { + return fmt.Errorf("filter function for key "+ + "%x: %w", leafKey, err) + } + + if include { + filteredLeaves[leafKey] = leafNode + } + } + + leaves = filteredLeaves + } + + // Insert all found leaves into the target tree using InsertMany for + // efficiency. + _, err = targetTree.InsertMany(ctx, leaves) + if err != nil { + return fmt.Errorf("error inserting leaves into "+ + "target tree: %w", err) + } + + return nil +} + // InsertMany inserts multiple leaf nodes provided in the leaves map within a // single database transaction. func (t *CompactedTree) InsertMany(ctx context.Context, diff --git a/mssmt/interface.go b/mssmt/interface.go index bf3759f0c..4756dee45 100644 --- a/mssmt/interface.go +++ b/mssmt/interface.go @@ -2,6 +2,12 @@ package mssmt import "context" +// CopyFilterPredicate is a type alias for a filter function used in CopyFilter. +// It takes a key and leaf node as input and returns a boolean indicating +// whether to include the leaf in the copy operation. A true value means the +// leaf should be included, while false means it should be excluded. +type CopyFilterPredicate = func([hashSize]byte, LeafNode) (bool, error) + // Tree is an interface defining an abstract MSSMT tree type. type Tree interface { // Root returns the root node of the MS-SMT. @@ -39,4 +45,10 @@ type Tree interface { // Copy copies all the key-value pairs from the source tree into the // target tree. Copy(ctx context.Context, targetTree Tree) error + + // CopyFilter copies all the key-value pairs from the source tree into + // the target tree that pass the filter callback. The filter callback is + // invoked for each leaf-key pair. + CopyFilter(ctx context.Context, targetTree Tree, + filterFunc CopyFilterPredicate) error } diff --git a/mssmt/tree.go b/mssmt/tree.go index 60e812184..8d244c1d6 100644 --- a/mssmt/tree.go +++ b/mssmt/tree.go @@ -440,6 +440,67 @@ func (t *FullTree) Copy(ctx context.Context, targetTree Tree) error { return nil } +// CopyFilter copies all the key-value pairs from the source tree into the +// target tree that pass the filter callback. The filter callback is invoked for +// each leaf-key pair. +func (t *FullTree) CopyFilter(ctx context.Context, targetTree Tree, + filterFunc CopyFilterPredicate) error { + + var leaves map[[hashSize]byte]*LeafNode + err := t.store.View(ctx, func(tx TreeStoreViewTx) error { + root, err := tx.RootNode() + if err != nil { + return fmt.Errorf("error getting root node: %w", err) + } + + // Optimization: If the source tree is empty, there's nothing + // to copy. + if IsEqualNode(root, EmptyTree[0]) { + leaves = make(map[[hashSize]byte]*LeafNode) + return nil + } + + leaves, err = findLeaves(ctx, tx, root, [hashSize]byte{}, 0) + if err != nil { + return fmt.Errorf("error finding leaves: %w", err) + } + + return nil + }) + if err != nil { + return err + } + + // Pass the leaves through the filter callback. + if filterFunc != nil { + var filteredLeaves = make(map[[hashSize]byte]*LeafNode) + + for leafKey, leafNode := range leaves { + include, err := filterFunc(leafKey, *leafNode) + if err != nil { + return fmt.Errorf("filter function for key "+ + "%x: %w", leafKey, err) + } + + if include { + filteredLeaves[leafKey] = leafNode + } + } + + leaves = filteredLeaves + } + + // Insert all found leaves into the target tree using InsertMany for + // efficiency. + _, err = targetTree.InsertMany(ctx, leaves) + if err != nil { + return fmt.Errorf("error inserting leaves into target "+ + "tree: %w", err) + } + + return nil +} + // InsertMany inserts multiple leaf nodes provided in the leaves map within a // single database transaction. func (t *FullTree) InsertMany(ctx context.Context, diff --git a/mssmt/tree_test.go b/mssmt/tree_test.go index 13c7f8791..12b14504d 100644 --- a/mssmt/tree_test.go +++ b/mssmt/tree_test.go @@ -978,6 +978,314 @@ func TestTreeCopy(t *testing.T) { } } +// testFilterScenario tests a single filter scenario for the CopyFilter method. +func testFilterScenario( + t *testing.T, ctx context.Context, sourceTree mssmt.Tree, + targetTree mssmt.Tree, filterFunc mssmt.CopyFilterPredicate, + expectCount int, description string, leaves []treeLeaf, + initialTargetLeavesMap map[[hashSize]byte]*mssmt.LeafNode) { + + // Pre-populate the target tree. + _, err := targetTree.InsertMany(ctx, initialTargetLeavesMap) + require.NoError(t, err) + + // Perform the filtered copy. + err = sourceTree.CopyFilter(ctx, targetTree, filterFunc) + require.NoError(t, err) + + // Calculate expected leaves based on the filter. + expectedLeaves := make(map[[hashSize]byte]*mssmt.LeafNode) + + // Start with initial target leaves. + for key, leaf := range initialTargetLeavesMap { + expectedLeaves[key] = leaf + } + + // Apply filter to source leaves. + if filterFunc == nil { + // Nil filter means include all. + for _, item := range leaves { + expectedLeaves[item.key] = item.leaf + } + } else { + for _, item := range leaves { + include, err := filterFunc(item.key, *item.leaf) + require.NoError(t, err) + if include { + expectedLeaves[item.key] = item.leaf + } + } + } + + // Verify the expected count. + actualFilteredCount := len(expectedLeaves) - len(initialTargetLeavesMap) + require.Equal(t, expectCount, actualFilteredCount, + "filtered leaf count mismatch for %s", description, + ) + + // Create expected state tree for root comparison. + expectedStateStore := mssmt.NewDefaultStore() + expectedStateTree := mssmt.NewFullTree(expectedStateStore) + + _, err = expectedStateTree.InsertMany(ctx, expectedLeaves) + require.NoError(t, err) + + expectedRoot, err := expectedStateTree.Root(ctx) + require.NoError(t, err) + + // Verify the target tree root matches the expected root. + targetRoot, err := targetTree.Root(ctx) + require.NoError(t, err) + require.True( + t, mssmt.IsEqualNode(expectedRoot, targetRoot), + "root mismatch after filtered copy with %s", description, + ) + + // Verify individual leaves in the target tree. + for key, expectedLeaf := range expectedLeaves { + targetLeaf, err := targetTree.Get(ctx, key) + require.NoError(t, err) + require.Equal(t, expectedLeaf, targetLeaf, + "leaf mismatch for key %x with %s", key, description, + ) + } + + // Verify that filtered-out leaves are not present. + if filterFunc == nil { + return + } + + for _, item := range leaves { + include, err := filterFunc(item.key, *item.leaf) + require.NoError(t, err) + if include { + continue + } + + // This leaf should not be in target tree unless it was in + // initial target leaves. + _, wasInitial := initialTargetLeavesMap[item.key] + if wasInitial { + continue + } + + targetLeaf, err := targetTree.Get(ctx, item.key) + require.NoError(t, err) + require.True( + t, targetLeaf.IsEmpty(), + "filtered-out leaf %x should not be present", item.key, + ) + } +} + +// TestTreeCopyFilter tests the CopyFilter method with various filter scenarios. +func TestTreeCopyFilter(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Prepare source trees (Full and Compacted). + sourceFullStore := mssmt.NewDefaultStore() + sourceFullTree := mssmt.NewFullTree(sourceFullStore) + + sourceCompactedStore := mssmt.NewDefaultStore() + sourceCompactedTree := mssmt.NewCompactedTree(sourceCompactedStore) + + leaves := randTree(20) + for _, item := range leaves { + _, err := sourceFullTree.Insert(ctx, item.key, item.leaf) + require.NoError(t, err) + + _, err = sourceCompactedTree.Insert(ctx, item.key, item.leaf) + require.NoError(t, err) + } + + sourceFullRoot, err := sourceFullTree.Root(ctx) + require.NoError(t, err) + + sourceCompactedRoot, err := sourceCompactedTree.Root(ctx) + require.NoError(t, err) + + require.True(t, mssmt.IsEqualNode(sourceFullRoot, sourceCompactedRoot)) + + // Define some leaves to pre-populate the target tree. + initialTargetLeaves := []treeLeaf{ + {key: test.RandHash(), leaf: randLeaf()}, + {key: test.RandHash(), leaf: randLeaf()}, + } + initialTargetLeavesMap := make(map[[hashSize]byte]*mssmt.LeafNode) + for _, item := range initialTargetLeaves { + initialTargetLeavesMap[item.key] = item.leaf + } + + // Get first 5 keys for deterministic filtering. + var excludeKeys [][hashSize]byte + for i, item := range leaves { + if i < 5 { + excludeKeys = append(excludeKeys, item.key) + continue + } + + break + } + + // Define filter scenarios. + // + // nolint: lll + filterScenarios := []struct { + name string + filterFunc mssmt.CopyFilterPredicate + expectCount int + description string + }{ + { + name: "include_all", + filterFunc: func(key [hashSize]byte, leaf mssmt.LeafNode) (bool, error) { + return true, nil + }, + expectCount: len(leaves), + description: "filter that includes all leaves.", + }, + { + name: "exclude_all", + filterFunc: func(key [hashSize]byte, leaf mssmt.LeafNode) (bool, error) { + return false, nil + }, + expectCount: 0, + description: "filter that excludes all leaves.", + }, + { + name: "exclude_five", + filterFunc: func(key [hashSize]byte, leaf mssmt.LeafNode) (bool, error) { + // Exclude exactly 5 specific leaves. + for _, excludeKey := range excludeKeys { + if key == excludeKey { + return false, nil + } + } + return true, nil + }, + expectCount: len(leaves) - 5, + description: "filter that excludes exactly 5 " + + "specific leaves.", + }, + { + name: "nil_filter", + filterFunc: nil, + expectCount: len(leaves), + description: "nil filter should include all leaves.", + }, + } + + // Define test cases for different tree type combinations. + testCases := []struct { + name string + sourceTree mssmt.Tree + makeTarget func() mssmt.Tree + }{ + { + name: "Full -> Full", + sourceTree: sourceFullTree, + makeTarget: func() mssmt.Tree { + return mssmt.NewFullTree( + mssmt.NewDefaultStore(), + ) + }, + }, + { + name: "Full -> Compacted", + sourceTree: sourceFullTree, + makeTarget: func() mssmt.Tree { + return mssmt.NewCompactedTree( + mssmt.NewDefaultStore(), + ) + }, + }, + { + name: "Compacted -> Full", + sourceTree: sourceCompactedTree, + makeTarget: func() mssmt.Tree { + return mssmt.NewFullTree( + mssmt.NewDefaultStore(), + ) + }, + }, + { + name: "Compacted -> Compacted", + sourceTree: sourceCompactedTree, + makeTarget: func() mssmt.Tree { + return mssmt.NewCompactedTree( + mssmt.NewDefaultStore(), + ) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + for _, fs := range filterScenarios { + fs := fs + + t.Run(fs.name, func(t *testing.T) { + t.Parallel() + + testFilterScenario( + t, ctx, tc.sourceTree, + tc.makeTarget(), + fs.filterFunc, fs.expectCount, + fs.description, leaves, + initialTargetLeavesMap, + ) + }) + } + }) + } +} + +// TestTreeCopyFilterError tests error handling in CopyFilter method. +func TestTreeCopyFilterError(t *testing.T) { + t.Parallel() + + leaves := randTree(10) + ctx := context.Background() + + // Prepare source tree. + sourceStore := mssmt.NewDefaultStore() + sourceTree := mssmt.NewFullTree(sourceStore) + + for _, item := range leaves { + _, err := sourceTree.Insert(ctx, item.key, item.leaf) + require.NoError(t, err) + } + + // Prepare target tree. + targetStore := mssmt.NewDefaultStore() + targetTree := mssmt.NewFullTree(targetStore) + + // Test filter function that returns an error. + errorFilter := func(key [hashSize]byte, leaf mssmt.LeafNode) (bool, + error) { + + // Return error for the first key we encounter. + return false, fmt.Errorf("test filter error for key %x", key) + } + + err := sourceTree.CopyFilter(ctx, targetTree, errorFilter) + require.Error(t, err) + require.Contains(t, err.Error(), "filter function for key") + require.Contains(t, err.Error(), "test filter error") + + // Verify target tree remains unchanged after error. + targetRoot, err := targetTree.Root(ctx) + require.NoError(t, err) + require.True(t, mssmt.IsEqualNode(targetRoot, mssmt.EmptyTree[0]), + "target tree should remain empty after filter error") +} + // TestInsertMany tests inserting multiple leaves using the InsertMany method. func TestInsertMany(t *testing.T) { t.Parallel() diff --git a/proof/util.go b/proof/util.go index fdf60b6b9..b7c6cc5fb 100644 --- a/proof/util.go +++ b/proof/util.go @@ -45,9 +45,9 @@ func unpackBits(bytes []byte) []bool { return bits } -// txSpendsPrevOut returns whether the given prevout is spent by the given +// TxSpendsPrevOut returns whether the given prevout is spent by the given // transaction. -func txSpendsPrevOut(tx *wire.MsgTx, prevOut *wire.OutPoint) bool { +func TxSpendsPrevOut(tx *wire.MsgTx, prevOut *wire.OutPoint) bool { for _, txIn := range tx.TxIn { if txIn.PreviousOutPoint == *prevOut { return true diff --git a/proof/verifier.go b/proof/verifier.go index 78bae3706..c83cfce2f 100644 --- a/proof/verifier.go +++ b/proof/verifier.go @@ -946,7 +946,7 @@ func (p *Proof) VerifyProofIntegrity(ctx context.Context, vCtx VerifierCtx, // 1. A transaction that spends the previous asset output has a valid // merkle proof within a block in the chain. - if !txSpendsPrevOut(&p.AnchorTx, &p.PrevOut) { + if !TxSpendsPrevOut(&p.AnchorTx, &p.PrevOut) { return nil, fmt.Errorf("%w: doesn't spend prev output", commitment.ErrInvalidTaprootProof) } diff --git a/rpcserver.go b/rpcserver.go index 8b62c5aeb..0b5f2b07b 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -57,6 +57,7 @@ import ( "github.com/lightninglabs/taproot-assets/tapsend" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" "github.com/lightningnetwork/lnd/build" lfn "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/keychain" @@ -4094,33 +4095,11 @@ func (r *rpcServer) UpdateSupplyCommit(ctx context.Context, req *unirpc.UpdateSupplyCommitRequest) ( *unirpc.UpdateSupplyCommitResponse, error) { - // Parse asset group key from the request. - var groupPubKey btcec.PublicKey - - switch { - case len(req.GetGroupKeyBytes()) > 0: - gk, err := btcec.ParsePubKey(req.GetGroupKeyBytes()) - if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) - } - - groupPubKey = *gk - - case len(req.GetGroupKeyStr()) > 0: - groupKeyBytes, err := hex.DecodeString(req.GetGroupKeyStr()) - if err != nil { - return nil, fmt.Errorf("decoding group key: %w", err) - } - - gk, err := btcec.ParsePubKey(groupKeyBytes) - if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) - } - - groupPubKey = *gk - - default: - return nil, fmt.Errorf("group key unspecified") + groupPubKey, err := unmarshalGroupKey( + req.GetGroupKeyBytes(), req.GetGroupKeyStr(), + ) + if err != nil { + return nil, fmt.Errorf("failed to parse group key: %w", err) } // We will now check to ensure that universe commitments are enabled for @@ -4128,7 +4107,7 @@ func (r *rpcServer) UpdateSupplyCommit(ctx context.Context, // // Look up the asset group by the group key. assetGroup, err := r.cfg.TapAddrBook.QueryAssetGroupByGroupKey( - ctx, &groupPubKey, + ctx, groupPubKey, ) if err != nil { return nil, fmt.Errorf("failed to find asset group "+ @@ -4150,11 +4129,10 @@ func (r *rpcServer) UpdateSupplyCommit(ctx context.Context, } // Formulate an asset specifier from the asset group key. - assetSpec := asset.NewSpecifierFromGroupKey(groupPubKey) + assetSpec := asset.NewSpecifierFromGroupKey(*groupPubKey) // Send a commit tick event to the supply commitment manager. - event := supplycommit.CommitTickEvent{} - err = r.cfg.SupplyCommitManager.SendEvent(ctx, assetSpec, &event) + err = r.cfg.SupplyCommitManager.StartSupplyPublishFlow(ctx, assetSpec) if err != nil { return nil, fmt.Errorf("failed to send commit tick event: %w", err) @@ -4197,8 +4175,9 @@ func inclusionProofs(ctx context.Context, tree mssmt.Tree, return proofs, nil } -// supplySubtreeRoot fetches the root of a specific supply subtree and its -// supply tree inclusion proof. +// supplySubtreeRoot formulates an inclusion proof for a supply subtree. +// The inclusion proof can be used to verify that the subtree root is indeed +// part of the supply commitment root. func supplySubtreeRoot(ctx context.Context, supplyTree mssmt.Tree, subtrees supplycommit.SupplyTrees, subtreeType supplycommit.SupplySubTree) ( @@ -4244,67 +4223,48 @@ func (r *rpcServer) FetchSupplyCommit(ctx context.Context, req *unirpc.FetchSupplyCommitRequest) ( *unirpc.FetchSupplyCommitResponse, error) { - // Parse asset group key from the request. - var groupPubKey btcec.PublicKey - - switch { - case len(req.GetGroupKeyBytes()) > 0: - gk, err := btcec.ParsePubKey(req.GetGroupKeyBytes()) - if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) - } - - groupPubKey = *gk - - case len(req.GetGroupKeyStr()) > 0: - groupKeyBytes, err := hex.DecodeString(req.GetGroupKeyStr()) - if err != nil { - return nil, fmt.Errorf("decoding group key: %w", err) - } - - gk, err := btcec.ParsePubKey(groupKeyBytes) - if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) - } - - groupPubKey = *gk - - default: - return nil, fmt.Errorf("group key unspecified") + groupPubKey, err := unmarshalGroupKey( + req.GetGroupKeyBytes(), req.GetGroupKeyStr(), + ) + if err != nil { + return nil, fmt.Errorf("failed to parse group key: %w", err) } // Formulate an asset specifier from the asset group key. - assetSpec := asset.NewSpecifierFromGroupKey(groupPubKey) + assetSpec := asset.NewSpecifierFromGroupKey(*groupPubKey) + + locator, err := unmarshalCommitLocator( + req.GetCommitOutpoint(), req.GetSpentCommitOutpoint(), + req.GetVeryFirst(), + ) + if err != nil { + return nil, fmt.Errorf("failed to parse commitment "+ + "locator: %w", err) + } // Fetch the supply commitment for the asset specifier. - respOpt, err := r.cfg.SupplyCommitManager.FetchCommitment( - ctx, assetSpec, + commit, err := r.cfg.SupplyVerifyManager.FetchCommitment( + ctx, assetSpec, locator, ) if err != nil { return nil, fmt.Errorf("failed to fetch supply commit: %w", err) } - if respOpt.IsNone() { - return nil, fmt.Errorf("supply commitment not found for "+ - "asset group with key %x", - groupPubKey.SerializeCompressed()) - } - resp, err := respOpt.UnwrapOrErr(fmt.Errorf("unexpected None value " + - "for supply commitment response")) - if err != nil { - return nil, err - } + rootCommit := commit.Commitment - supplyTreeRoot, err := resp.SupplyTree.Root(ctx) + issuanceLeaves, burnLeaves, ignoreLeaves, err := marshalSupplyLeaves( + commit.Leaves, + ) if err != nil { - return nil, fmt.Errorf("failed to get supply tree root: %w", + return nil, fmt.Errorf("unable to marshal supply leaves: %w", err) } // Fetch subtree commitment root and inclusion proofs for the issuance // subtree. rpcIssuanceSubtreeRoot, err := supplySubtreeRoot( - ctx, resp.SupplyTree, resp.Subtrees, supplycommit.MintTreeType, + ctx, commit.SupplyTree, commit.Subtrees, + supplycommit.MintTreeType, ) if err != nil { return nil, fmt.Errorf("failed to fetch supply issuance "+ @@ -4314,7 +4274,8 @@ func (r *rpcServer) FetchSupplyCommit(ctx context.Context, // Fetch subtree commitment root and inclusion proofs for the burn // subtree. rpcBurnSubtreeRoot, err := supplySubtreeRoot( - ctx, resp.SupplyTree, resp.Subtrees, supplycommit.BurnTreeType, + ctx, commit.SupplyTree, commit.Subtrees, + supplycommit.BurnTreeType, ) if err != nil { return nil, fmt.Errorf("failed to fetch supply burn subtree "+ @@ -4324,7 +4285,7 @@ func (r *rpcServer) FetchSupplyCommit(ctx context.Context, // Fetch subtree commitment root and inclusion proofs for the ignore // subtree. rpcIgnoreSubtreeRoot, err := supplySubtreeRoot( - ctx, resp.SupplyTree, resp.Subtrees, + ctx, commit.SupplyTree, commit.Subtrees, supplycommit.IgnoreTreeType, ) if err != nil { @@ -4332,232 +4293,561 @@ func (r *rpcServer) FetchSupplyCommit(ctx context.Context, "root: %w", err) } - // Get inclusion proofs for any issuance leaf key specified in the - // request. - issuanceTree := resp.Subtrees[supplycommit.MintTreeType] - issuanceInclusionProofs, err := inclusionProofs( - ctx, issuanceTree, req.IssuanceLeafKeys, + // Create a chain proof from the commitment block data. + commitBlock, err := rootCommit.CommitmentBlock.UnwrapOrErr( + fmt.Errorf("commitment block not found"), ) if err != nil { - return nil, fmt.Errorf("failed to fetch issuance tree "+ - "inclusion proofs: %w", err) + return nil, err } - // Get inclusion proofs for any burn leaf key specified in the request. - burnTree := resp.Subtrees[supplycommit.BurnTreeType] - burnInclusionProofs, err := inclusionProofs( - ctx, burnTree, req.BurnLeafKeys, - ) - if err != nil { - return nil, fmt.Errorf("failed to fetch burn tree "+ - "inclusion proofs: %w", err) + // Sanity check commitment block data. + if commitBlock.BlockHeader == nil || commitBlock.MerkleProof == nil { + return nil, fmt.Errorf("commitment block data is incomplete") } - // Get inclusion proofs for any ignore leaf key specified in the - // request. - ignoreTree := resp.Subtrees[supplycommit.IgnoreTreeType] - ignoreInclusionProofs, err := inclusionProofs( - ctx, ignoreTree, req.IgnoreLeafKeys, - ) - if err != nil { - return nil, fmt.Errorf("failed to fetch ignore tree "+ - "inclusion proofs: %w", err) + chainProof := supplycommit.ChainProof{ + Header: *commitBlock.BlockHeader, + BlockHeight: commitBlock.Height, + MerkleProof: *commitBlock.MerkleProof, + TxIndex: commitBlock.TxIndex, } - // Sanity check: ensure the supply root derived from the supply tree - // matches the root provided in the chain commitment. - if resp.ChainCommitment.SupplyRoot.NodeHash() != - supplyTreeRoot.NodeHash() { - - return nil, fmt.Errorf("mismatched supply commitment root: "+ - "expected %x, got %x", - resp.ChainCommitment.SupplyRoot.NodeHash(), - supplyTreeRoot.NodeHash()) + // Marshal the chain data using the existing function. + chainData, err := marshalSupplyCommitChainData(rootCommit, chainProof) + if err != nil { + return nil, fmt.Errorf("failed to marshal supply commit "+ + "chain data: %w", err) } - txOutInternalKey := resp.ChainCommitment.InternalKey.PubKey - - // Extract the block height and hash from the chain commitment if - // present. - var ( - blockHeight uint32 - blockHash []byte - txIndex uint32 - chainFees int64 - ) - resp.ChainCommitment.CommitmentBlock.WhenSome( - func(b supplycommit.CommitmentBlock) { - blockHeight = b.Height - blockHash = b.Hash[:] - txIndex = b.TxIndex - chainFees = b.ChainFees + // Marshal the spent commitment outpoint if available. + var spentCommitmentOutpoint *taprpc.OutPoint + rootCommit.SpentCommitment.WhenSome( + func(outpoint wire.OutPoint) { + spentCommitmentOutpoint = &taprpc.OutPoint{ + Txid: outpoint.Hash[:], + OutputIndex: outpoint.Index, + } }, ) - return &unirpc.FetchSupplyCommitResponse{ - SupplyCommitmentRoot: marshalMssmtNode(supplyTreeRoot), - - AnchorTxid: resp.ChainCommitment.Txn.TxID(), - AnchorTxOutIdx: resp.ChainCommitment.TxOutIdx, - AnchorTxOutInternalKey: txOutInternalKey.SerializeCompressed(), + // Calculate the total outstanding supply based on the supply + // subtrees. + totalOutstandingSupply, err := supplycommit.CalcTotalOutstandingSupply( + ctx, commit.Subtrees, + ).Unpack() + if err != nil { + return nil, fmt.Errorf("failed to calculate total "+ + "outstanding supply: %w", err) + } - BlockHeight: blockHeight, - BlockHash: blockHash, - BlockTxIndex: txIndex, - TxChainFeesSats: chainFees, + return &unirpc.FetchSupplyCommitResponse{ + ChainData: chainData, + TxChainFeesSats: commitBlock.ChainFees, IssuanceSubtreeRoot: rpcIssuanceSubtreeRoot, BurnSubtreeRoot: rpcBurnSubtreeRoot, IgnoreSubtreeRoot: rpcIgnoreSubtreeRoot, - IssuanceLeafInclusionProofs: issuanceInclusionProofs, - BurnLeafInclusionProofs: burnInclusionProofs, - IgnoreLeafInclusionProofs: ignoreInclusionProofs, + IssuanceLeaves: issuanceLeaves, + BurnLeaves: burnLeaves, + IgnoreLeaves: ignoreLeaves, + + TotalOutstandingSupply: totalOutstandingSupply, + SpentCommitmentOutpoint: spentCommitmentOutpoint, }, nil } +// mapSupplyLeaves is a generic helper that converts a slice of supply update +// events into a slice of RPC SupplyLeafEntry objects. +func mapSupplyLeaves[E any](entries []E) ([]*unirpc.SupplyLeafEntry, error) { + return fn.MapErr(entries, func(i E) (*unirpc.SupplyLeafEntry, error) { + interfaceType, ok := any(&i).(supplycommit.SupplyUpdateEvent) + if !ok { + return nil, fmt.Errorf("expected supply update event, "+ + "got %T", i) + } + return marshalSupplyUpdateEvent(interfaceType) + }) +} + +// marshalSupplyLeaves converts a SupplyLeaves struct into the corresponding +// RPC SupplyLeafEntry slices for issuance, burn, and ignore leaves. +func marshalSupplyLeaves( + leaves supplycommit.SupplyLeaves) ([]*unirpc.SupplyLeafEntry, + []*unirpc.SupplyLeafEntry, []*unirpc.SupplyLeafEntry, error) { + + rpcIssuanceLeaves, err := mapSupplyLeaves(leaves.IssuanceLeafEntries) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to marshal issuance "+ + "leaf: %w", err) + } + + rpcBurnLeaves, err := mapSupplyLeaves(leaves.BurnLeafEntries) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to marshal burn "+ + "leaf: %w", err) + } + + rpcIgnoreLeaves, err := mapSupplyLeaves(leaves.IgnoreLeafEntries) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to marshal burn "+ + "leaf: %w", err) + } + + return rpcIssuanceLeaves, rpcBurnLeaves, rpcIgnoreLeaves, nil +} + // FetchSupplyLeaves returns the set of supply leaves for the given asset // specifier within the specified height range. func (r *rpcServer) FetchSupplyLeaves(ctx context.Context, req *unirpc.FetchSupplyLeavesRequest) ( *unirpc.FetchSupplyLeavesResponse, error) { - // Parse asset group key from the request. - var groupPubKey btcec.PublicKey + groupPubKey, err := unmarshalGroupKey( + req.GetGroupKeyBytes(), req.GetGroupKeyStr(), + ) + if err != nil { + return nil, fmt.Errorf("failed to parse group key: %w", err) + } + + // Formulate an asset specifier from the asset group key. + assetSpec := asset.NewSpecifierFromGroupKey(*groupPubKey) - switch { - case len(req.GetGroupKeyBytes()) > 0: - gk, err := btcec.ParsePubKey(req.GetGroupKeyBytes()) + // Fetch supply leaves for the asset specifier. + resp, err := r.cfg.SupplyCommitManager.FetchSupplyLeavesByHeight( + ctx, assetSpec, req.BlockHeightStart, req.BlockHeightEnd, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch supply leaves: %w", err) + } + + // Check if inclusion proofs are requested. + needsInclusionProofs := len(req.IssuanceLeafKeys) > 0 || + len(req.BurnLeafKeys) > 0 || len(req.IgnoreLeafKeys) > 0 + + // If inclusion proofs are requested, fetch the subtrees. + var subtrees supplycommit.SupplyTrees + if needsInclusionProofs { + subtreeResult, err := r.cfg.SupplyCommitManager.FetchSubTrees( + ctx, assetSpec, fn.None[uint32](), + ) if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) + return nil, fmt.Errorf("failed to fetch subtrees for "+ + "inclusion proofs: %w", err) } - groupPubKey = *gk + subtrees = subtreeResult + } + + issuanceLeaves, burnLeaves, ignoreLeaves, err := marshalSupplyLeaves( + resp, + ) + if err != nil { + return nil, fmt.Errorf("unable to marshal supply leaves: %w", + err) + } - case len(req.GetGroupKeyStr()) > 0: - groupKeyBytes, err := hex.DecodeString(req.GetGroupKeyStr()) + // Generate inclusion proofs if requested. + var ( + issuanceInclusionProofs [][]byte + burnInclusionProofs [][]byte + ignoreInclusionProofs [][]byte + ) + + if needsInclusionProofs { + // Get inclusion proofs for any issuance leaf key specified in + // the request. + issuanceTree := subtrees[supplycommit.MintTreeType] + var err error + issuanceInclusionProofs, err = inclusionProofs( + ctx, issuanceTree, req.IssuanceLeafKeys, + ) if err != nil { - return nil, fmt.Errorf("decoding group key: %w", err) + return nil, fmt.Errorf("failed to fetch issuance tree "+ + "inclusion proofs: %w", err) } - gk, err := btcec.ParsePubKey(groupKeyBytes) + // Get inclusion proofs for any burn leaf key specified in the + // request. + burnTree := subtrees[supplycommit.BurnTreeType] + burnInclusionProofs, err = inclusionProofs( + ctx, burnTree, req.BurnLeafKeys, + ) if err != nil { - return nil, fmt.Errorf("parsing group key: %w", err) + return nil, fmt.Errorf("failed to fetch burn tree "+ + "inclusion proofs: %w", err) } - groupPubKey = *gk + // Get inclusion proofs for any ignore leaf key specified in the + // request. + ignoreTree := subtrees[supplycommit.IgnoreTreeType] + ignoreInclusionProofs, err = inclusionProofs( + ctx, ignoreTree, req.IgnoreLeafKeys, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch ignore tree "+ + "inclusion proofs: %w", err) + } + } - default: - return nil, fmt.Errorf("group key unspecified") + return &unirpc.FetchSupplyLeavesResponse{ + IssuanceLeaves: issuanceLeaves, + BurnLeaves: burnLeaves, + IgnoreLeaves: ignoreLeaves, + IssuanceLeafInclusionProofs: issuanceInclusionProofs, + BurnLeafInclusionProofs: burnInclusionProofs, + IgnoreLeafInclusionProofs: ignoreInclusionProofs, + }, nil +} + +// unmarshalMintSupplyLeaf converts an RPC SupplyLeafEntry into a NewMintEvent. +func unmarshalMintSupplyLeaf( + rpcLeaf *unirpc.SupplyLeafEntry) (*supplycommit.NewMintEvent, error) { + + if rpcLeaf == nil { + return nil, fmt.Errorf("supply leaf entry is nil") } - // Formulate an asset specifier from the asset group key. - assetSpec := asset.NewSpecifierFromGroupKey(groupPubKey) + if rpcLeaf.LeafKey == nil { + return nil, fmt.Errorf("supply leaf key is nil") + } - // Fetch supply leaves for the asset specifier. - resp, err := r.cfg.SupplyCommitManager.FetchSupplyLeavesByHeight( - ctx, assetSpec, req.BlockHeightStart, req.BlockHeightEnd, - ) + if rpcLeaf.LeafNode == nil { + return nil, fmt.Errorf("supply leaf node is nil") + } + + if len(rpcLeaf.RawLeaf) == 0 { + return nil, fmt.Errorf("missing RawLeaf data for mint event") + } + + var mintEvent supplycommit.NewMintEvent + err := mintEvent.Decode(bytes.NewReader(rpcLeaf.RawLeaf)) if err != nil { - return nil, fmt.Errorf("failed to fetch supply leaves: %w", err) + return nil, fmt.Errorf("unable to decode mint event: %w", err) } - rpcMarshalLeafEntry := func(leafEntry supplycommit.SupplyUpdateEvent) ( - *unirpc.SupplyLeafEntry, error) { + // Validate that the decoded event matches the provided metadata. + if mintEvent.BlockHeight() != rpcLeaf.BlockHeight { + return nil, fmt.Errorf("block height mismatch: "+ + "decoded=%d, provided=%d", mintEvent.BlockHeight(), + rpcLeaf.BlockHeight) + } - leafNode, err := leafEntry.UniverseLeafNode() - if err != nil { - rpcsLog.Errorf("Failed to get universe leaf node "+ - "from leaf entry: %v (leaf_entry=%s)", err, - spew.Sdump(leafEntry)) + return &mintEvent, nil +} - return nil, fmt.Errorf("failed to get universe leaf "+ - "node from leaf entry: %w", err) - } +// unmarshalBurnSupplyLeaf converts an RPC SupplyLeafEntry into a NewBurnEvent. +func unmarshalBurnSupplyLeaf( + rpcLeaf *unirpc.SupplyLeafEntry) (*supplycommit.NewBurnEvent, error) { - leafKey := leafEntry.UniverseLeafKey() + if rpcLeaf == nil { + return nil, fmt.Errorf("supply leaf entry is nil") + } - outPoint := leafKey.LeafOutPoint() - rpcOutPoint := unirpc.Outpoint{ - HashStr: outPoint.Hash.String(), - Index: int32(outPoint.Index), - } + if rpcLeaf.LeafKey == nil { + return nil, fmt.Errorf("supply leaf key is nil") + } - // Encode the leaf as a byte slice. - var leafBuf bytes.Buffer - err = leafEntry.Encode(&leafBuf) - if err != nil { - return nil, fmt.Errorf("failed to encode leaf entry: "+ - "%w", err) + if rpcLeaf.LeafNode == nil { + return nil, fmt.Errorf("supply leaf node is nil") + } + + if len(rpcLeaf.RawLeaf) == 0 { + return nil, fmt.Errorf("missing RawLeaf data for burn event") + } + + // Create and decode the burn leaf from raw leaf bytes. + var burnLeaf universe.BurnLeaf + err := burnLeaf.Decode(bytes.NewReader(rpcLeaf.RawLeaf)) + if err != nil { + return nil, fmt.Errorf("unable to decode burn leaf: %w", err) + } + + burnEvent := &supplycommit.NewBurnEvent{ + BurnLeaf: burnLeaf, + } + + // Validate that the decoded event matches the provided metadata. + if burnEvent.BlockHeight() != rpcLeaf.BlockHeight { + return nil, fmt.Errorf("block height mismatch: "+ + "decoded=%d, provided=%d", burnEvent.BlockHeight(), + rpcLeaf.BlockHeight) + } + + return burnEvent, nil +} + +// unmarshalIgnoreSupplyLeaf converts an RPC SupplyLeafEntry into a +// NewIgnoreEvent. +func unmarshalIgnoreSupplyLeaf( + rpcLeaf *unirpc.SupplyLeafEntry) (*supplycommit.NewIgnoreEvent, error) { + + if rpcLeaf == nil { + return nil, fmt.Errorf("supply leaf entry is nil") + } + + if rpcLeaf.LeafKey == nil { + return nil, fmt.Errorf("supply leaf key is nil") + } + + if rpcLeaf.LeafNode == nil { + return nil, fmt.Errorf("supply leaf node is nil") + } + + if len(rpcLeaf.RawLeaf) == 0 { + return nil, fmt.Errorf("missing RawLeaf data for ignore event") + } + + var signedIgnoreTuple universe.SignedIgnoreTuple + err := signedIgnoreTuple.Decode(bytes.NewReader(rpcLeaf.RawLeaf)) + if err != nil { + return nil, fmt.Errorf("unable to decode signed ignore "+ + "tuple: %w", err) + } + + ignoreEvent := &supplycommit.NewIgnoreEvent{ + SignedIgnoreTuple: signedIgnoreTuple, + } + + // Validate that the decoded event matches the provided metadata. + if ignoreEvent.BlockHeight() != rpcLeaf.BlockHeight { + return nil, fmt.Errorf("block height mismatch: "+ + "decoded=%d, provided=%d", ignoreEvent.BlockHeight(), + rpcLeaf.BlockHeight) + } + + return ignoreEvent, nil +} + +// marshalSupplyUpdateEvent converts a SupplyUpdateEvent into an RPC +// SupplyLeafEntry. +func marshalSupplyUpdateEvent( + leafEntry supplycommit.SupplyUpdateEvent) (*unirpc.SupplyLeafEntry, + error) { + + leafNode, err := leafEntry.UniverseLeafNode() + if err != nil { + return nil, fmt.Errorf("unable to get universe leaf node "+ + "from leaf entry: %w", err) + } + + leafKey := leafEntry.UniverseLeafKey() + + outPoint := leafKey.LeafOutPoint() + rpcOutPoint := unirpc.Outpoint{ + HashStr: outPoint.Hash.String(), + Index: int32(outPoint.Index), + } + + // Encode the leaf as a byte slice. + var leafBuf bytes.Buffer + err = leafEntry.Encode(&leafBuf) + if err != nil { + return nil, fmt.Errorf("unable to encode leaf entry: %w", err) + } + + return &unirpc.SupplyLeafEntry{ + LeafKey: &unirpc.SupplyLeafKey{ + Outpoint: &rpcOutPoint, + ScriptKey: schnorr.SerializePubKey( + leafKey.LeafScriptKey().PubKey, + ), + AssetId: fn.ByteSlice(leafKey.LeafAssetID()), + }, + LeafNode: marshalMssmtNode(leafNode), + BlockHeight: leafEntry.BlockHeight(), + RawLeaf: leafBuf.Bytes(), + }, nil +} + +// unmarshalSupplyCommitChainData converts an RPC SupplyCommitChainData into +// both a supplycommit.RootCommitment and supplycommit.ChainProof. +func unmarshalSupplyCommitChainData( + rpcData *unirpc.SupplyCommitChainData) (*supplycommit.RootCommitment, + error) { + + if rpcData == nil { + return nil, fmt.Errorf("supply commit chain data is nil") + } + + var txn wire.MsgTx + err := txn.Deserialize(bytes.NewReader(rpcData.Txn)) + if err != nil { + return nil, fmt.Errorf("unable to deserialize transaction: %w", + err) + } + + internalKey, err := btcec.ParsePubKey(rpcData.InternalKey) + if err != nil { + return nil, fmt.Errorf("unable to parse internal key: %w", err) + } + + outputKey, err := btcec.ParsePubKey(rpcData.OutputKey) + if err != nil { + return nil, fmt.Errorf("unable to parse output key: %w", err) + } + + // Convert supply root hash. + if len(rpcData.SupplyRootHash) != 32 { + return nil, fmt.Errorf("invalid supply root hash size: "+ + "expected %d, got %d", 32, len(rpcData.SupplyRootHash)) + } + var supplyRootHash mssmt.NodeHash + copy(supplyRootHash[:], rpcData.SupplyRootHash) + + // Create commitment block from the hash. + var commitmentBlock fn.Option[supplycommit.CommitmentBlock] + if len(rpcData.BlockHash) > 0 { + if len(rpcData.BlockHash) != chainhash.HashSize { + return nil, fmt.Errorf("invalid block hash size: "+ + "expected %d, got %d", chainhash.HashSize, + len(rpcData.BlockHash)) } + var blockHash chainhash.Hash + copy(blockHash[:], rpcData.BlockHash) - return &unirpc.SupplyLeafEntry{ - LeafKey: &unirpc.SupplyLeafKey{ - Outpoint: &rpcOutPoint, - ScriptKey: schnorr.SerializePubKey( - leafKey.LeafScriptKey().PubKey, - ), - AssetId: fn.ByteSlice(leafKey.LeafAssetID()), - }, - LeafNode: marshalMssmtNode(leafNode), - BlockHeight: leafEntry.BlockHeight(), - RawLeaf: leafBuf.Bytes(), - }, nil + commitmentBlock = fn.Some(supplycommit.CommitmentBlock{ + Height: rpcData.BlockHeight, + Hash: blockHash, + TxIndex: rpcData.TxIndex, + }) } - // Marshal issuance supply leaves into the RPC format. - rpcIssuanceLeaves := make( - []*unirpc.SupplyLeafEntry, 0, len(resp.IssuanceLeafEntries), + rootCommitment := &supplycommit.RootCommitment{ + Txn: &txn, + TxOutIdx: rpcData.TxOutIdx, + InternalKey: keychain.KeyDescriptor{ + PubKey: internalKey, + }, + OutputKey: outputKey, + SupplyRoot: mssmt.NewComputedBranch( + supplyRootHash, rpcData.SupplyRootSum, + ), + CommitmentBlock: commitmentBlock, + } + + var blockHeader wire.BlockHeader + err = blockHeader.Deserialize(bytes.NewReader(rpcData.BlockHeader)) + if err != nil { + return nil, fmt.Errorf("unable to deserialize block header: "+ + "%w", err) + } + + var merkleProof proof.TxMerkleProof + err = merkleProof.Decode(bytes.NewReader(rpcData.TxBlockMerkleProof)) + if err != nil { + return nil, fmt.Errorf("unable to decode merkle proof: %w", err) + } + + rootCommitment.CommitmentBlock = fn.Some(supplycommit.CommitmentBlock{ + Height: rpcData.BlockHeight, + Hash: blockHeader.BlockHash(), + TxIndex: rpcData.TxIndex, + BlockHeader: &blockHeader, + MerkleProof: &merkleProof, + }) + + return rootCommitment, nil +} + +// unmarshalSupplyLeaves converts the RPC supply leaves into a SupplyLeaves +// struct that can be used by the supply commitment verifier. +func unmarshalSupplyLeaves(issuanceLeaves, burnLeaves, + ignoreLeaves []*unirpc.SupplyLeafEntry) (*supplycommit.SupplyLeaves, + error) { + + var ( + supplyLeaves supplycommit.SupplyLeaves + err error ) - for idx := range resp.IssuanceLeafEntries { - leafEntry := resp.IssuanceLeafEntries[idx] + supplyLeaves.IssuanceLeafEntries, err = fn.MapErrWithPtr( + issuanceLeaves, unmarshalMintSupplyLeaf, + ) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal mint event: %w", + err) + } - rpcLeaf, err := rpcMarshalLeafEntry(&leafEntry) - if err != nil { - return nil, fmt.Errorf("failed to marshal supply "+ - "leaf entry: %w", err) - } + supplyLeaves.BurnLeafEntries, err = fn.MapErrWithPtr( + burnLeaves, unmarshalBurnSupplyLeaf, + ) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal burn event: %w", + err) + } - rpcIssuanceLeaves = append(rpcIssuanceLeaves, rpcLeaf) + supplyLeaves.IgnoreLeafEntries, err = fn.MapErrWithPtr( + ignoreLeaves, unmarshalIgnoreSupplyLeaf, + ) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal ignore event: %w", + err) } - // Marshal burn supply leaves into the RPC format. - rpcBurnLeaves := make( - []*unirpc.SupplyLeafEntry, 0, len(resp.BurnLeafEntries), + return &supplyLeaves, nil +} + +// InsertSupplyCommit stores a verified supply commitment for the given +// asset group in the node's local database. +func (r *rpcServer) InsertSupplyCommit(ctx context.Context, + req *unirpc.InsertSupplyCommitRequest) ( + *unirpc.InsertSupplyCommitResponse, error) { + + groupPubKey, err := unmarshalGroupKey( + req.GetGroupKeyBytes(), req.GetGroupKeyStr(), ) - for idx := range resp.BurnLeafEntries { - leafEntry := resp.BurnLeafEntries[idx] + if err != nil { + return nil, fmt.Errorf("failed to parse group key: %w", err) + } + + // Log the operation for debugging purposes. + rpcsLog.Debugf("InsertSupplyCommitment called for group key: %x", + groupPubKey.SerializeCompressed()) - rpcLeaf, err := rpcMarshalLeafEntry(&leafEntry) + // Unmarshal the supply commit chain data. + rootCommitment, err := unmarshalSupplyCommitChainData(req.ChainData) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal chain data: %w", + err) + } + + if req.SpentCommitmentOutpoint != nil { + op, err := rpcutils.UnmarshalOutPoint( + req.SpentCommitmentOutpoint, + ) if err != nil { - return nil, fmt.Errorf("failed to marshal supply "+ - "leaf entry: %w", err) + return nil, fmt.Errorf("failed to parse spent "+ + "commitment outpoint: %w", err) } - rpcBurnLeaves = append(rpcBurnLeaves, rpcLeaf) + rootCommitment.SpentCommitment = fn.Some(op) } - // Marshal ignore supply leaves into the RPC format. - rpcIgnoreLeaves := make( - []*unirpc.SupplyLeafEntry, 0, len(resp.IgnoreLeafEntries), + supplyLeaves, err := unmarshalSupplyLeaves( + req.IssuanceLeaves, req.BurnLeaves, req.IgnoreLeaves, ) - for idx := range resp.IgnoreLeafEntries { - leafEntry := resp.IgnoreLeafEntries[idx] + if err != nil { + return nil, fmt.Errorf("unable to unmarshal supply leaves: %w", + err) + } - rpcLeaf, err := rpcMarshalLeafEntry(&leafEntry) - if err != nil { - return nil, fmt.Errorf("failed to marshal supply "+ - "leaf entry: %w", err) - } + rpcsLog.Debugf("Successfully unmarshalled commitment, %d issuance, "+ + "%d burn, and %d ignore leaves, and chain proof", + len(supplyLeaves.IssuanceLeafEntries), + len(supplyLeaves.BurnLeafEntries), + len(supplyLeaves.IgnoreLeafEntries)) - rpcIgnoreLeaves = append(rpcIgnoreLeaves, rpcLeaf) + assetSpec := asset.NewSpecifierFromGroupKey(*groupPubKey) + err = r.cfg.SupplyVerifyManager.InsertSupplyCommit( + ctx, assetSpec, *rootCommitment, *supplyLeaves, + ) + if err != nil { + return nil, fmt.Errorf("failed to insert supply commitment: %w", + err) } - return &unirpc.FetchSupplyLeavesResponse{ - IssuanceLeaves: rpcIssuanceLeaves, - BurnLeaves: rpcBurnLeaves, - IgnoreLeaves: rpcIgnoreLeaves, - }, nil + return &unirpc.InsertSupplyCommitResponse{}, nil } // SubscribeSendAssetEventNtfns registers a subscription to the event @@ -6669,6 +6959,87 @@ func unmarshalUniverseKey(key *unirpc.UniverseKey) (universe.Identifier, return uniID, leafKey, nil } +// unmarshalGroupKey attempts to parse a group key from either the byte slice +// or hex string form. If the group key is specified in both forms, the byte +// slice form takes precedence (which usually can't be the case because we use +// this for `oneof` gRPC fields that can't specify both). +func unmarshalGroupKey(groupKeyBytes []byte, + groupKeyStr string) (*btcec.PublicKey, error) { + + switch { + case len(groupKeyBytes) > 0: + gk, err := btcec.ParsePubKey(groupKeyBytes) + if err != nil { + return nil, fmt.Errorf("error parsing group key: %w", + err) + } + + return gk, nil + + case len(groupKeyStr) > 0: + groupKeyBytes, err := hex.DecodeString(groupKeyStr) + if err != nil { + return nil, fmt.Errorf("error decoding group key: %w", + err) + } + + gk, err := btcec.ParsePubKey(groupKeyBytes) + if err != nil { + return nil, fmt.Errorf("error parsing group key: %w", + err) + } + + return gk, nil + + default: + return nil, fmt.Errorf("group key unspecified") + } +} + +// unmarshalCommitLocator attempts to parse a commitment locator from the +// RPC form. +func unmarshalCommitLocator(outpoint, spentOutpoint *taprpc.OutPoint, + veryFirst bool) (supplyverifier.CommitLocator, error) { + + var zero supplyverifier.CommitLocator + + // These values come from a gRPC `oneof` field, so only one can be set + // at a time. + switch { + case outpoint != nil: + op, err := rpcutils.UnmarshalOutPoint(outpoint) + if err != nil { + return zero, fmt.Errorf("error parsing outpoint: %w", + err) + } + + return supplyverifier.CommitLocator{ + LocatorType: supplyverifier.LocatorTypeOutpoint, + Outpoint: op, + }, nil + + case spentOutpoint != nil: + op, err := rpcutils.UnmarshalOutPoint(spentOutpoint) + if err != nil { + return zero, fmt.Errorf("error parsing spent "+ + "outpoint: %w", err) + } + + return supplyverifier.CommitLocator{ + LocatorType: supplyverifier.LocatorTypeSpentOutpoint, + Outpoint: op, + }, nil + + case veryFirst: + return supplyverifier.CommitLocator{ + LocatorType: supplyverifier.LocatorTypeVeryFirst, + }, nil + + default: + return zero, fmt.Errorf("commitment locator must be set") + } +} + // unmarshalAssetLeaf unmarshals an asset leaf from the RPC form. func unmarshalAssetLeaf(leaf *unirpc.AssetLeaf) (*universe.Leaf, error) { // We'll just pull the asset details from the serialized issuance proof diff --git a/server.go b/server.go index c4816286e..303df0c5e 100644 --- a/server.go +++ b/server.go @@ -217,6 +217,12 @@ func (s *Server) initialize(interceptorChain *rpcperms.InterceptorChain) error { err) } + // Start universe supply verify manager. + if err := s.cfg.SupplyVerifyManager.Start(); err != nil { + return fmt.Errorf("unable to start supply verify manager: %w", + err) + } + // Start the auxiliary components. if err := s.cfg.AuxLeafSigner.Start(); err != nil { return fmt.Errorf("unable to start aux leaf signer: %w", err) @@ -747,6 +753,12 @@ func (s *Server) Stop() error { err) } + // Stop universe supply verify manager. + if err := s.cfg.SupplyVerifyManager.Stop(); err != nil { + return fmt.Errorf("unable to stop supply verify manager: %w", + err) + } + if err := s.cfg.AuxLeafSigner.Stop(); err != nil { return err } diff --git a/supplysync_rpc.go b/supplysync_rpc.go new file mode 100644 index 000000000..36babfbae --- /dev/null +++ b/supplysync_rpc.go @@ -0,0 +1,176 @@ +package taprootassets + +import ( + "bytes" + "context" + "fmt" + + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/taprpc" + unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" + "github.com/lightninglabs/taproot-assets/universe" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" +) + +// RpcSupplySync is an implementation of the universe.SupplySyncer interface +// that uses an RPC connection to target a remote universe server. +type RpcSupplySync struct { + // serverAddr is the address of the remote universe server. + serverAddr universe.ServerAddr + + // conn is the RPC connection to the remote universe server. + conn *universeClientConn +} + +// NewRpcSupplySync creates a new RpcSupplySync instance that dials out to +// the target remote universe server address. +func NewRpcSupplySync( + serverAddr universe.ServerAddr) (supplyverifier.UniverseClient, error) { + + conn, err := ConnectUniverse(serverAddr) + if err != nil { + return nil, fmt.Errorf("unable to connect to universe RPC "+ + "server: %w", err) + } + + return &RpcSupplySync{ + serverAddr: serverAddr, + conn: conn, + }, nil +} + +// Ensure NewRpcSupplySync is of type UniverseClientFactory. +var _ supplyverifier.UniverseClientFactory = NewRpcSupplySync + +// InsertSupplyCommit inserts a supply commitment for a specific asset +// group into the remote universe server. +func (r *RpcSupplySync) InsertSupplyCommit(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves, + chainProof supplycommit.ChainProof) error { + + srvrLog.Infof("[RpcSupplySync.InsertSupplyCommit]: inserting supply "+ + "commitment into remote server "+ + "(server_addr=%s, asset=%s, supply_tree_root_hash=%s)", + r.serverAddr.HostStr(), assetSpec.String(), + commitment.SupplyRoot.NodeHash().String()) + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return fmt.Errorf("unable to unwrap group key: %w", err) + } + + // Marshal the supply commit chain data to RPC format. + rpcChainData, err := marshalSupplyCommitChainData( + commitment, chainProof, + ) + if err != nil { + return fmt.Errorf("unable to marshal chain data: %w", err) + } + + issuanceLeaves, burnLeaves, ignoreLeaves, err := marshalSupplyLeaves( + leaves, + ) + if err != nil { + return fmt.Errorf("unable to marshal supply leaves: %w", err) + } + + // Marshall spent commitment outpoint. + var spentCommitmentOutpoint *taprpc.OutPoint + commitment.SpentCommitment.WhenSome(func(point wire.OutPoint) { + spentCommitmentOutpoint = &taprpc.OutPoint{ + Txid: point.Hash[:], + OutputIndex: point.Index, + } + }) + + req := &unirpc.InsertSupplyCommitRequest{ + GroupKey: &unirpc.InsertSupplyCommitRequest_GroupKeyBytes{ + GroupKeyBytes: groupKey.SerializeCompressed(), + }, + ChainData: rpcChainData, + SpentCommitmentOutpoint: spentCommitmentOutpoint, + IssuanceLeaves: issuanceLeaves, + BurnLeaves: burnLeaves, + IgnoreLeaves: ignoreLeaves, + } + + _, err = r.conn.InsertSupplyCommit(ctx, req) + if err != nil { + return fmt.Errorf("unable to insert supply commitment: %w", err) + } + + srvrLog.Infof("[RpcSupplySync.InsertSupplyCommit]: succeeded in "+ + "inserting supply commitment "+ + "(server_addr=%s, asset=%s, supply_tree_root_hash=%s)", + r.serverAddr.HostStr(), assetSpec.String(), + commitment.SupplyRoot.NodeHash().String()) + + return nil +} + +// Close closes the RPC connection to the universe server. +func (r *RpcSupplySync) Close() error { + if r.conn != nil && r.conn.ClientConn != nil { + return r.conn.ClientConn.Close() + } + return nil +} + +// marshalSupplyCommitChainData converts a supplycommit.RootCommitment and +// supplycommit.ChainProof into a combined RPC SupplyCommitChainData. +func marshalSupplyCommitChainData( + rootCommitment supplycommit.RootCommitment, + chainProof supplycommit.ChainProof) (*unirpc.SupplyCommitChainData, + error) { + + // Serialize the transaction. + var txnBuf bytes.Buffer + err := rootCommitment.Txn.Serialize(&txnBuf) + if err != nil { + return nil, fmt.Errorf("unable to serialize transaction: %w", + err) + } + + // Serialize the block header. + var headerBuf bytes.Buffer + err = chainProof.Header.Serialize(&headerBuf) + if err != nil { + return nil, fmt.Errorf("unable to serialize block header: %w", + err) + } + + // Serialize the merkle proof. + var merkleProofBuf bytes.Buffer + err = chainProof.MerkleProof.Encode(&merkleProofBuf) + if err != nil { + return nil, fmt.Errorf("unable to encode merkle proof: %w", + err) + } + + // nolint: lll + rpcChainData := &unirpc.SupplyCommitChainData{ + Txn: txnBuf.Bytes(), + TxOutIdx: rootCommitment.TxOutIdx, + InternalKey: rootCommitment.InternalKey.PubKey.SerializeCompressed(), + OutputKey: rootCommitment.OutputKey.SerializeCompressed(), + SupplyRootHash: fn.ByteSlice(rootCommitment.SupplyRoot.NodeHash()), + SupplyRootSum: rootCommitment.SupplyRoot.NodeSum(), + BlockHeader: headerBuf.Bytes(), + BlockHeight: chainProof.BlockHeight, + TxBlockMerkleProof: merkleProofBuf.Bytes(), + TxIndex: chainProof.TxIndex, + } + + // Handle optional commitment block hash. + rootCommitment.CommitmentBlock.WhenSome( + func(block supplycommit.CommitmentBlock) { + rpcChainData.BlockHash = block.Hash[:] + }, + ) + + return rpcChainData, nil +} diff --git a/tapcfg/server.go b/tapcfg/server.go index 1a10f705f..a94b43711 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -25,6 +25,7 @@ import ( "github.com/lightninglabs/taproot-assets/tapscript" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/clock" lfn "github.com/lightningnetwork/lnd/fn/v2" @@ -518,15 +519,27 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, ) supplyCommitStore := tapdb.NewSupplyCommitMachine(supplyCommitDb) + // Setup supply syncer. + supplySyncerStore := tapdb.NewSupplySyncerStore(uniDB) + supplySyncer := supplyverifier.NewSupplySyncer( + supplyverifier.SupplySyncerConfig{ + ClientFactory: tap.NewRpcSupplySync, + Store: supplySyncerStore, + UniverseFederationView: federationDB, + }, + ) + // Create the supply commitment state machine manager, which is used to // manage the supply commitment state machines for each asset group. - supplyCommitManager := supplycommit.NewMultiStateMachineManager( - supplycommit.MultiStateMachineManagerCfg{ + supplyCommitManager := supplycommit.NewManager( + supplycommit.ManagerCfg{ TreeView: supplyTreeStore, Commitments: supplyCommitStore, Wallet: walletAnchor, + AssetLookup: tapdbAddrBook, KeyRing: keyRing, Chain: chainBridge, + SupplySyncer: &supplySyncer, DaemonAdapters: lndFsmDaemonAdapters, StateLog: supplyCommitStore, ChainParams: *tapChainParams.Params, @@ -534,6 +547,21 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, }, ) + // Set up the supply verifier, which validates supply commitment leaves + // published by asset issuers. + supplyVerifyManager := supplyverifier.NewManager( + supplyverifier.ManagerCfg{ + Chain: chainBridge, + AssetLookup: tapdbAddrBook, + Lnd: lndServices, + SupplyCommitView: supplyCommitStore, + SupplyTreeView: supplyTreeStore, + GroupFetcher: assetMintingStore, + IssuanceSubscriptions: universeSyncer, + DaemonAdapters: lndFsmDaemonAdapters, + }, + ) + // For the porter, we'll make a multi-notifier comprised of all the // possible proof file sources to ensure it can always fetch input // proofs. @@ -689,6 +717,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, FsmDaemonAdapters: lndFsmDaemonAdapters, SupplyCommitManager: supplyCommitManager, IgnoreChecker: ignoreChecker, + SupplyVerifyManager: supplyVerifyManager, UniverseArchive: uniArchive, UniverseSyncer: universeSyncer, UniverseFederation: universeFederation, diff --git a/tapdb/migrations.go b/tapdb/migrations.go index 74fcc4187..4666eb5c5 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -24,7 +24,7 @@ const ( // daemon. // // NOTE: This MUST be updated when a new migration is added. - LatestMigrationVersion = 44 + LatestMigrationVersion = 45 ) // DatabaseBackend is an interface that contains all methods our different diff --git a/tapdb/sqlc/migrations/000045_supply_syncer_push_log.down.sql b/tapdb/sqlc/migrations/000045_supply_syncer_push_log.down.sql new file mode 100644 index 000000000..d5c9c4867 --- /dev/null +++ b/tapdb/sqlc/migrations/000045_supply_syncer_push_log.down.sql @@ -0,0 +1,11 @@ +-- Drop the supply_syncer_push_log table and its indexes. +DROP INDEX IF EXISTS supply_syncer_push_log_server_address_idx; +DROP INDEX IF EXISTS supply_syncer_push_log_group_key_idx; +DROP TABLE IF EXISTS supply_syncer_push_log; + +-- Drop supply_commitments changes. +DROP INDEX IF EXISTS supply_commitments_outpoint_uk; +DROP INDEX IF EXISTS supply_commitments_spent_commitment_idx; + +ALTER TABLE supply_commitments + DROP COLUMN spent_commitment; diff --git a/tapdb/sqlc/migrations/000045_supply_syncer_push_log.up.sql b/tapdb/sqlc/migrations/000045_supply_syncer_push_log.up.sql new file mode 100644 index 000000000..127935a45 --- /dev/null +++ b/tapdb/sqlc/migrations/000045_supply_syncer_push_log.up.sql @@ -0,0 +1,55 @@ +-- Table to track supply commitment pushes to remote universe servers. +CREATE TABLE supply_syncer_push_log ( + id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this push log belongs + -- to. This should match the group_key format used in universe_supply_roots. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The highest block height among all supply leaves in this push. + max_pushed_block_height INTEGER NOT NULL, + + -- The server address (host:port) where the commitment was pushed. + server_address TEXT NOT NULL, + + -- The transaction ID (hash) of the supply commitment. + commit_txid BLOB NOT NULL CHECK(length(commit_txid) = 32), + + -- The supply commitment output index within the commitment transaction. + output_index INTEGER NOT NULL, + + -- The number of leaves included in this specific push (diff count between + -- last commitment and current commitment). + num_leaves_pushed INTEGER NOT NULL, + + -- The timestamp when this push log entry was created (unix timestamp in seconds). + created_at BIGINT NOT NULL +); + +-- Add index for frequent lookups by group key. +CREATE INDEX supply_syncer_push_log_group_key_idx + ON supply_syncer_push_log(group_key); + +-- Add index for lookups by server address. +CREATE INDEX supply_syncer_push_log_server_address_idx + ON supply_syncer_push_log(server_address); + +-- A nullable column to track the previous supply commitment that was spent to +-- create a new supply commitment. This is only NULL for the very first +-- commitment of an asset group, each subsequent commitment needs to spend a +-- prior commitment to ensure continuity in the supply chain. +ALTER TABLE supply_commitments + ADD COLUMN spent_commitment BIGINT + REFERENCES supply_commitments(commit_id); + +-- Add an index to speed up lookups by spent commitment. +CREATE INDEX supply_commitments_spent_commitment_idx + ON supply_commitments(spent_commitment); + +-- The outpoint of a supply commitment must be unique. Because we don't have a +-- separate field for the outpoint, we create a unique index over the chain +-- transaction ID and output index. This ensures that each commitment can be +-- uniquely identified by its transaction and output index, preventing +-- duplicate commitments for the same output. +CREATE UNIQUE INDEX supply_commitments_outpoint_uk + ON supply_commitments(chain_txn_id, output_index); diff --git a/tapdb/sqlc/models.go b/tapdb/sqlc/models.go index 97d4f3c89..fb1e3aeb2 100644 --- a/tapdb/sqlc/models.go +++ b/tapdb/sqlc/models.go @@ -401,17 +401,29 @@ type SupplyCommitUpdateType struct { } type SupplyCommitment struct { - CommitID int64 - GroupKey []byte - ChainTxnID int64 - OutputIndex sql.NullInt32 - InternalKeyID int64 - OutputKey []byte - BlockHeader []byte - BlockHeight sql.NullInt32 - MerkleProof []byte - SupplyRootHash []byte - SupplyRootSum sql.NullInt64 + CommitID int64 + GroupKey []byte + ChainTxnID int64 + OutputIndex sql.NullInt32 + InternalKeyID int64 + OutputKey []byte + BlockHeader []byte + BlockHeight sql.NullInt32 + MerkleProof []byte + SupplyRootHash []byte + SupplyRootSum sql.NullInt64 + SpentCommitment sql.NullInt64 +} + +type SupplySyncerPushLog struct { + ID int64 + GroupKey []byte + MaxPushedBlockHeight int32 + ServerAddress string + CommitTxid []byte + OutputIndex int32 + NumLeavesPushed int32 + CreatedAt int64 } type SupplyUpdateEvent struct { diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index 08c71b1b3..c9cc85d3b 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -103,6 +103,9 @@ type Querier interface { FetchSeedlingID(ctx context.Context, arg FetchSeedlingIDParams) (int64, error) FetchSeedlingsForBatch(ctx context.Context, rawKey []byte) ([]FetchSeedlingsForBatchRow, error) FetchSupplyCommit(ctx context.Context, groupKey []byte) (FetchSupplyCommitRow, error) + // Fetches all push log entries for a given asset group, ordered by + // creation time with the most recent entries first. + FetchSupplySyncerPushLogs(ctx context.Context, groupKey []byte) ([]SupplySyncerPushLog, error) // Sort the nodes by node_index here instead of returning the indices. FetchTapscriptTree(ctx context.Context, rootHash []byte) ([]FetchTapscriptTreeRow, error) FetchTransferInputs(ctx context.Context, transferID int64) ([]FetchTransferInputsRow, error) @@ -136,6 +139,10 @@ type Querier interface { InsertRootKey(ctx context.Context, arg InsertRootKeyParams) error InsertSupplyCommitTransition(ctx context.Context, arg InsertSupplyCommitTransitionParams) (int64, error) InsertSupplyCommitment(ctx context.Context, arg InsertSupplyCommitmentParams) (int64, error) + // Inserts a new push log entry to track a successful supply commitment + // push to a remote universe server. The commit_txid and output_index are + // taken directly from the RootCommitment outpoint. + InsertSupplySyncerPushLog(ctx context.Context, arg InsertSupplySyncerPushLogParams) error InsertSupplyUpdateEvent(ctx context.Context, arg InsertSupplyUpdateEventParams) error InsertTxProof(ctx context.Context, arg InsertTxProofParams) error InsertUniverseServer(ctx context.Context, arg InsertUniverseServerParams) error @@ -181,8 +188,12 @@ type Querier interface { QueryPassiveAssets(ctx context.Context, transferID int64) ([]QueryPassiveAssetsRow, error) QueryPendingSupplyCommitTransition(ctx context.Context, groupKey []byte) (QueryPendingSupplyCommitTransitionRow, error) QueryProofTransferAttempts(ctx context.Context, arg QueryProofTransferAttemptsParams) ([]time.Time, error) + QueryStartingSupplyCommitment(ctx context.Context, groupKey []byte) (QueryStartingSupplyCommitmentRow, error) QuerySupplyCommitStateMachine(ctx context.Context, groupKey []byte) (QuerySupplyCommitStateMachineRow, error) - QuerySupplyCommitment(ctx context.Context, commitID int64) (SupplyCommitment, error) + QuerySupplyCommitment(ctx context.Context, commitID int64) (QuerySupplyCommitmentRow, error) + QuerySupplyCommitmentByOutpoint(ctx context.Context, arg QuerySupplyCommitmentByOutpointParams) (QuerySupplyCommitmentByOutpointRow, error) + QuerySupplyCommitmentBySpentOutpoint(ctx context.Context, arg QuerySupplyCommitmentBySpentOutpointParams) (QuerySupplyCommitmentBySpentOutpointRow, error) + QuerySupplyCommitmentOutpoint(ctx context.Context, commitID int64) (QuerySupplyCommitmentOutpointRow, error) QuerySupplyLeavesByHeight(ctx context.Context, arg QuerySupplyLeavesByHeightParams) ([]QuerySupplyLeavesByHeightRow, error) QuerySupplyUpdateEvents(ctx context.Context, transitionID sql.NullInt64) ([]QuerySupplyUpdateEventsRow, error) // TODO(roasbeef): use the universe id instead for the grouping? so namespace diff --git a/tapdb/sqlc/queries/supply_commit.sql b/tapdb/sqlc/queries/supply_commit.sql index bb6291e7a..8cccddc73 100644 --- a/tapdb/sqlc/queries/supply_commit.sql +++ b/tapdb/sqlc/queries/supply_commit.sql @@ -30,11 +30,11 @@ RETURNING current_state_id, latest_commitment_id; -- name: InsertSupplyCommitment :one INSERT INTO supply_commitments ( group_key, chain_txn_id, - output_index, internal_key_id, output_key, -- Core fields + output_index, internal_key_id, output_key, spent_commitment, -- Core fields block_height, block_header, merkle_proof, -- Nullable chain details supply_root_hash, supply_root_sum -- Nullable root details ) VALUES ( - $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 ) RETURNING commit_id; -- name: UpdateSupplyCommitmentChainDetails :exec @@ -134,10 +134,52 @@ SET transition_id = @transition_id WHERE group_key = @group_key AND transition_id IS NULL; -- name: QuerySupplyCommitment :one -SELECT * -FROM supply_commitments +SELECT sqlc.embed(sc), ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id WHERE commit_id = @commit_id; +-- name: QuerySupplyCommitmentByOutpoint :one +SELECT sqlc.embed(sc), ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.group_key = @group_key AND + sc.output_index = @output_index AND + ct.txid = @txid; + +-- name: QuerySupplyCommitmentBySpentOutpoint :one +WITH spent_commitment AS ( + SELECT ssc.commit_id + FROM supply_commitments AS ssc + JOIN chain_txns AS ct + ON ssc.chain_txn_id = ct.txn_id + WHERE ssc.group_key = @group_key AND + ssc.output_index = @output_index AND + ct.txid = @txid +) +SELECT sqlc.embed(sc), ct.tx_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.spent_commitment = (SELECT commit_id FROM spent_commitment); + +-- name: QueryStartingSupplyCommitment :one +SELECT sqlc.embed(sc), ct.tx_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.spent_commitment IS NULL AND + sc.group_key = @group_key; + +-- name: QuerySupplyCommitmentOutpoint :one +SELECT ct.txid, sc.output_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.commit_id = @commit_id; + -- name: UpdateSupplyCommitTransitionCommitment :exec UPDATE supply_commit_transitions SET new_commitment_id = @new_commitment_id, @@ -181,17 +223,7 @@ WHERE outpoint = @outpoint -- name: FetchSupplyCommit :one SELECT - sc.commit_id, - sc.output_index, - sc.output_key, - sqlc.embed(ik), - txn.raw_tx, - txn.block_height, - txn.block_hash, - txn.tx_index, - txn.chain_fees, - sc.supply_root_hash AS root_hash, - sc.supply_root_sum AS root_sum + sqlc.embed(sc), txn.tx_index FROM supply_commit_state_machines sm JOIN supply_commitments sc ON sm.latest_commitment_id = sc.commit_id diff --git a/tapdb/sqlc/queries/supply_syncer.sql b/tapdb/sqlc/queries/supply_syncer.sql new file mode 100644 index 000000000..19365765d --- /dev/null +++ b/tapdb/sqlc/queries/supply_syncer.sql @@ -0,0 +1,20 @@ +-- name: InsertSupplySyncerPushLog :exec +-- Inserts a new push log entry to track a successful supply commitment +-- push to a remote universe server. The commit_txid and output_index are +-- taken directly from the RootCommitment outpoint. +INSERT INTO supply_syncer_push_log ( + group_key, max_pushed_block_height, server_address, + commit_txid, output_index, num_leaves_pushed, created_at +) VALUES ( + @group_key, @max_pushed_block_height, @server_address, + @commit_txid, @output_index, @num_leaves_pushed, @created_at +); + +-- name: FetchSupplySyncerPushLogs :many +-- Fetches all push log entries for a given asset group, ordered by +-- creation time with the most recent entries first. +SELECT id, group_key, max_pushed_block_height, server_address, + commit_txid, output_index, num_leaves_pushed, created_at +FROM supply_syncer_push_log +WHERE group_key = @group_key +ORDER BY created_at DESC; diff --git a/tapdb/sqlc/schemas/generated_schema.sql b/tapdb/sqlc/schemas/generated_schema.sql index d61be7eb6..0d686d16e 100644 --- a/tapdb/sqlc/schemas/generated_schema.sql +++ b/tapdb/sqlc/schemas/generated_schema.sql @@ -879,12 +879,52 @@ CREATE TABLE supply_commitments ( -- The root sum of the supply commitment at this snapshot. supply_root_sum BIGINT -); +, spent_commitment BIGINT + REFERENCES supply_commitments(commit_id)); CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); +CREATE UNIQUE INDEX supply_commitments_outpoint_uk + ON supply_commitments(chain_txn_id, output_index); + +CREATE INDEX supply_commitments_spent_commitment_idx + ON supply_commitments(spent_commitment); + +CREATE TABLE supply_syncer_push_log ( + id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this push log belongs + -- to. This should match the group_key format used in universe_supply_roots. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The highest block height among all supply leaves in this push. + max_pushed_block_height INTEGER NOT NULL, + + -- The server address (host:port) where the commitment was pushed. + server_address TEXT NOT NULL, + + -- The transaction ID (hash) of the supply commitment. + commit_txid BLOB NOT NULL CHECK(length(commit_txid) = 32), + + -- The supply commitment output index within the commitment transaction. + output_index INTEGER NOT NULL, + + -- The number of leaves included in this specific push (diff count between + -- last commitment and current commitment). + num_leaves_pushed INTEGER NOT NULL, + + -- The timestamp when this push log entry was created (unix timestamp in seconds). + created_at BIGINT NOT NULL +); + +CREATE INDEX supply_syncer_push_log_group_key_idx + ON supply_syncer_push_log(group_key); + +CREATE INDEX supply_syncer_push_log_server_address_idx + ON supply_syncer_push_log(server_address); + CREATE TABLE supply_update_events ( event_id INTEGER PRIMARY KEY, diff --git a/tapdb/sqlc/supply_commit.sql.go b/tapdb/sqlc/supply_commit.sql.go index efbac58f3..d33ae0104 100644 --- a/tapdb/sqlc/supply_commit.sql.go +++ b/tapdb/sqlc/supply_commit.sql.go @@ -69,17 +69,7 @@ func (q *Queries) FetchInternalKeyByID(ctx context.Context, keyID int64) (FetchI const FetchSupplyCommit = `-- name: FetchSupplyCommit :one SELECT - sc.commit_id, - sc.output_index, - sc.output_key, - ik.key_id, ik.raw_key, ik.key_family, ik.key_index, - txn.raw_tx, - txn.block_height, - txn.block_hash, - txn.tx_index, - txn.chain_fees, - sc.supply_root_hash AS root_hash, - sc.supply_root_sum AS root_sum + sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, txn.tx_index FROM supply_commit_state_machines sm JOIN supply_commitments sc ON sm.latest_commitment_id = sc.commit_id @@ -93,37 +83,27 @@ WHERE ` type FetchSupplyCommitRow struct { - CommitID int64 - OutputIndex sql.NullInt32 - OutputKey []byte - InternalKey InternalKey - RawTx []byte - BlockHeight sql.NullInt32 - BlockHash []byte - TxIndex sql.NullInt32 - ChainFees int64 - RootHash []byte - RootSum sql.NullInt64 + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 } func (q *Queries) FetchSupplyCommit(ctx context.Context, groupKey []byte) (FetchSupplyCommitRow, error) { row := q.db.QueryRowContext(ctx, FetchSupplyCommit, groupKey) var i FetchSupplyCommitRow err := row.Scan( - &i.CommitID, - &i.OutputIndex, - &i.OutputKey, - &i.InternalKey.KeyID, - &i.InternalKey.RawKey, - &i.InternalKey.KeyFamily, - &i.InternalKey.KeyIndex, - &i.RawTx, - &i.BlockHeight, - &i.BlockHash, + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, &i.TxIndex, - &i.ChainFees, - &i.RootHash, - &i.RootSum, ) return i, err } @@ -248,25 +228,26 @@ func (q *Queries) InsertSupplyCommitTransition(ctx context.Context, arg InsertSu const InsertSupplyCommitment = `-- name: InsertSupplyCommitment :one INSERT INTO supply_commitments ( group_key, chain_txn_id, - output_index, internal_key_id, output_key, -- Core fields + output_index, internal_key_id, output_key, spent_commitment, -- Core fields block_height, block_header, merkle_proof, -- Nullable chain details supply_root_hash, supply_root_sum -- Nullable root details ) VALUES ( - $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 ) RETURNING commit_id ` type InsertSupplyCommitmentParams struct { - GroupKey []byte - ChainTxnID int64 - OutputIndex sql.NullInt32 - InternalKeyID int64 - OutputKey []byte - BlockHeight sql.NullInt32 - BlockHeader []byte - MerkleProof []byte - SupplyRootHash []byte - SupplyRootSum sql.NullInt64 + GroupKey []byte + ChainTxnID int64 + OutputIndex sql.NullInt32 + InternalKeyID int64 + OutputKey []byte + SpentCommitment sql.NullInt64 + BlockHeight sql.NullInt32 + BlockHeader []byte + MerkleProof []byte + SupplyRootHash []byte + SupplyRootSum sql.NullInt64 } func (q *Queries) InsertSupplyCommitment(ctx context.Context, arg InsertSupplyCommitmentParams) (int64, error) { @@ -276,6 +257,7 @@ func (q *Queries) InsertSupplyCommitment(ctx context.Context, arg InsertSupplyCo arg.OutputIndex, arg.InternalKeyID, arg.OutputKey, + arg.SpentCommitment, arg.BlockHeight, arg.BlockHeader, arg.MerkleProof, @@ -447,6 +429,41 @@ func (q *Queries) QueryPendingSupplyCommitTransition(ctx context.Context, groupK return i, err } +const QueryStartingSupplyCommitment = `-- name: QueryStartingSupplyCommitment :one +SELECT sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, ct.tx_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.spent_commitment IS NULL AND + sc.group_key = $1 +` + +type QueryStartingSupplyCommitmentRow struct { + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 +} + +func (q *Queries) QueryStartingSupplyCommitment(ctx context.Context, groupKey []byte) (QueryStartingSupplyCommitmentRow, error) { + row := q.db.QueryRowContext(ctx, QueryStartingSupplyCommitment, groupKey) + var i QueryStartingSupplyCommitmentRow + err := row.Scan( + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, + &i.TxIndex, + ) + return i, err +} + const QuerySupplyCommitStateMachine = `-- name: QuerySupplyCommitStateMachine :one SELECT sm.group_key, @@ -479,30 +496,150 @@ func (q *Queries) QuerySupplyCommitStateMachine(ctx context.Context, groupKey [] } const QuerySupplyCommitment = `-- name: QuerySupplyCommitment :one -SELECT commit_id, group_key, chain_txn_id, output_index, internal_key_id, output_key, block_header, block_height, merkle_proof, supply_root_hash, supply_root_sum -FROM supply_commitments +SELECT sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id WHERE commit_id = $1 ` -func (q *Queries) QuerySupplyCommitment(ctx context.Context, commitID int64) (SupplyCommitment, error) { +type QuerySupplyCommitmentRow struct { + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 +} + +func (q *Queries) QuerySupplyCommitment(ctx context.Context, commitID int64) (QuerySupplyCommitmentRow, error) { row := q.db.QueryRowContext(ctx, QuerySupplyCommitment, commitID) - var i SupplyCommitment + var i QuerySupplyCommitmentRow err := row.Scan( - &i.CommitID, - &i.GroupKey, - &i.ChainTxnID, - &i.OutputIndex, - &i.InternalKeyID, - &i.OutputKey, - &i.BlockHeader, - &i.BlockHeight, - &i.MerkleProof, - &i.SupplyRootHash, - &i.SupplyRootSum, + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, + &i.TxIndex, + ) + return i, err +} + +const QuerySupplyCommitmentByOutpoint = `-- name: QuerySupplyCommitmentByOutpoint :one +SELECT sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.group_key = $1 AND + sc.output_index = $2 AND + ct.txid = $3 +` + +type QuerySupplyCommitmentByOutpointParams struct { + GroupKey []byte + OutputIndex sql.NullInt32 + Txid []byte +} + +type QuerySupplyCommitmentByOutpointRow struct { + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 +} + +func (q *Queries) QuerySupplyCommitmentByOutpoint(ctx context.Context, arg QuerySupplyCommitmentByOutpointParams) (QuerySupplyCommitmentByOutpointRow, error) { + row := q.db.QueryRowContext(ctx, QuerySupplyCommitmentByOutpoint, arg.GroupKey, arg.OutputIndex, arg.Txid) + var i QuerySupplyCommitmentByOutpointRow + err := row.Scan( + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, + &i.TxIndex, + ) + return i, err +} + +const QuerySupplyCommitmentBySpentOutpoint = `-- name: QuerySupplyCommitmentBySpentOutpoint :one +WITH spent_commitment AS ( + SELECT ssc.commit_id + FROM supply_commitments AS ssc + JOIN chain_txns AS ct + ON ssc.chain_txn_id = ct.txn_id + WHERE ssc.group_key = $1 AND + ssc.output_index = $2 AND + ct.txid = $3 +) +SELECT sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, ct.tx_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.spent_commitment = (SELECT commit_id FROM spent_commitment) +` + +type QuerySupplyCommitmentBySpentOutpointParams struct { + GroupKey []byte + OutputIndex sql.NullInt32 + Txid []byte +} + +type QuerySupplyCommitmentBySpentOutpointRow struct { + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 +} + +func (q *Queries) QuerySupplyCommitmentBySpentOutpoint(ctx context.Context, arg QuerySupplyCommitmentBySpentOutpointParams) (QuerySupplyCommitmentBySpentOutpointRow, error) { + row := q.db.QueryRowContext(ctx, QuerySupplyCommitmentBySpentOutpoint, arg.GroupKey, arg.OutputIndex, arg.Txid) + var i QuerySupplyCommitmentBySpentOutpointRow + err := row.Scan( + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, + &i.TxIndex, ) return i, err } +const QuerySupplyCommitmentOutpoint = `-- name: QuerySupplyCommitmentOutpoint :one +SELECT ct.txid, sc.output_index +FROM supply_commitments AS sc + JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.commit_id = $1 +` + +type QuerySupplyCommitmentOutpointRow struct { + Txid []byte + OutputIndex sql.NullInt32 +} + +func (q *Queries) QuerySupplyCommitmentOutpoint(ctx context.Context, commitID int64) (QuerySupplyCommitmentOutpointRow, error) { + row := q.db.QueryRowContext(ctx, QuerySupplyCommitmentOutpoint, commitID) + var i QuerySupplyCommitmentOutpointRow + err := row.Scan(&i.Txid, &i.OutputIndex) + return i, err +} + const QuerySupplyUpdateEvents = `-- name: QuerySupplyUpdateEvents :many SELECT ue.event_id, diff --git a/tapdb/sqlc/supply_syncer.sql.go b/tapdb/sqlc/supply_syncer.sql.go new file mode 100644 index 000000000..8eefd56e4 --- /dev/null +++ b/tapdb/sqlc/supply_syncer.sql.go @@ -0,0 +1,88 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: supply_syncer.sql + +package sqlc + +import ( + "context" +) + +const FetchSupplySyncerPushLogs = `-- name: FetchSupplySyncerPushLogs :many +SELECT id, group_key, max_pushed_block_height, server_address, + commit_txid, output_index, num_leaves_pushed, created_at +FROM supply_syncer_push_log +WHERE group_key = $1 +ORDER BY created_at DESC +` + +// Fetches all push log entries for a given asset group, ordered by +// creation time with the most recent entries first. +func (q *Queries) FetchSupplySyncerPushLogs(ctx context.Context, groupKey []byte) ([]SupplySyncerPushLog, error) { + rows, err := q.db.QueryContext(ctx, FetchSupplySyncerPushLogs, groupKey) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SupplySyncerPushLog + for rows.Next() { + var i SupplySyncerPushLog + if err := rows.Scan( + &i.ID, + &i.GroupKey, + &i.MaxPushedBlockHeight, + &i.ServerAddress, + &i.CommitTxid, + &i.OutputIndex, + &i.NumLeavesPushed, + &i.CreatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const InsertSupplySyncerPushLog = `-- name: InsertSupplySyncerPushLog :exec +INSERT INTO supply_syncer_push_log ( + group_key, max_pushed_block_height, server_address, + commit_txid, output_index, num_leaves_pushed, created_at +) VALUES ( + $1, $2, $3, + $4, $5, $6, $7 +) +` + +type InsertSupplySyncerPushLogParams struct { + GroupKey []byte + MaxPushedBlockHeight int32 + ServerAddress string + CommitTxid []byte + OutputIndex int32 + NumLeavesPushed int32 + CreatedAt int64 +} + +// Inserts a new push log entry to track a successful supply commitment +// push to a remote universe server. The commit_txid and output_index are +// taken directly from the RootCommitment outpoint. +func (q *Queries) InsertSupplySyncerPushLog(ctx context.Context, arg InsertSupplySyncerPushLogParams) error { + _, err := q.db.ExecContext(ctx, InsertSupplySyncerPushLog, + arg.GroupKey, + arg.MaxPushedBlockHeight, + arg.ServerAddress, + arg.CommitTxid, + arg.OutputIndex, + arg.NumLeavesPushed, + arg.CreatedAt, + ) + return err +} diff --git a/tapdb/supply_commit.go b/tapdb/supply_commit.go index c2f643be7..033bb8788 100644 --- a/tapdb/supply_commit.go +++ b/tapdb/supply_commit.go @@ -18,18 +18,11 @@ import ( "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightninglabs/taproot-assets/universe/supplyverifier" lfn "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/lnutils" ) -// commitmentChainInfo holds optional chain confirmation details for a -// commitment. -type commitmentChainInfo struct { - BlockHeader *wire.BlockHeader - MerkleProof *proof.TxMerkleProof - BlockHeight uint32 -} - type ( // UnspentPrecommits is an alias for the sqlc type representing an // unspent pre-commitment row. @@ -145,9 +138,8 @@ type SupplyCommitStore interface { arg InsertSupplyUpdateEvent) error // UpsertChainTx upserts a chain transaction. - UpsertChainTx( - ctx context.Context, arg UpsertChainTxParams, - ) (int64, error) + UpsertChainTx(ctx context.Context, + arg UpsertChainTxParams) (int64, error) // UpdateSupplyCommitTransitionCommitment updates the pending commit tx // ID for a @@ -179,7 +171,29 @@ type SupplyCommitStore interface { // QuerySupplyCommitment fetches a specific supply commitment by ID. QuerySupplyCommitment(ctx context.Context, - commitID int64) (sqlc.SupplyCommitment, error) + commitID int64) (sqlc.QuerySupplyCommitmentRow, error) + + // QuerySupplyCommitmentByOutpoint fetches a supply commitment by its + // outpoint. + QuerySupplyCommitmentByOutpoint(ctx context.Context, + arg sqlc.QuerySupplyCommitmentByOutpointParams) ( + sqlc.QuerySupplyCommitmentByOutpointRow, error) + + // QuerySupplyCommitmentBySpentOutpoint fetches a supply commitment by + // its spent outpoint. + QuerySupplyCommitmentBySpentOutpoint(ctx context.Context, + arg sqlc.QuerySupplyCommitmentBySpentOutpointParams) ( + sqlc.QuerySupplyCommitmentBySpentOutpointRow, error) + + // QueryStartingSupplyCommitment fetches the very first supply + // commitment of an asset group. + QueryStartingSupplyCommitment(ctx context.Context, + groupKey []byte) (sqlc.QueryStartingSupplyCommitmentRow, error) + + // QuerySupplyCommitmentOutpoint fetches the outpoint of a supply + // commitment by its ID. + QuerySupplyCommitmentOutpoint(ctx context.Context, + commitID int64) (sqlc.QuerySupplyCommitmentOutpointRow, error) // FetchChainTx fetches a chain transaction by its TXID. FetchChainTx(ctx context.Context, txid []byte) (ChainTxn, error) @@ -345,91 +359,15 @@ func (s *SupplyCommitMachine) SupplyCommit(ctx context.Context, err) } - internalKey, err := parseInternalKey(row.InternalKey) - if err != nil { - return fmt.Errorf("error parsing internal key: %w", err) - } - - outputKey, err := btcec.ParsePubKey(row.OutputKey) - if err != nil { - return fmt.Errorf("error parsing output key: %w", err) - } - - var commitTx wire.MsgTx - err = commitTx.Deserialize(bytes.NewReader(row.RawTx)) - if err != nil { - return fmt.Errorf("error deserializing commit tx: %w", - err) - } - - // Parse block related data from row if present. - var commitmentBlock fn.Option[supplycommit.CommitmentBlock] - if len(row.BlockHash) > 0 { - // Parse block height if present, otherwise return an - // error as it must be set if block hash is set. - if !row.BlockHeight.Valid { - return fmt.Errorf("block height must be set " + - "if block hash is set") - } - - blockHeight := uint32(row.BlockHeight.Int32) - - // Parse the block hash, which should be valid at this - // point. - blockHash, err := chainhash.NewHash(row.BlockHash) - if err != nil { - return fmt.Errorf("parsing block hash: %w", err) - } - - // Parse transaction block index which should be set - // if the block height is set. - if !row.TxIndex.Valid { - return fmt.Errorf("transaction index must be " + - "set if block height is set") - } - txIndex := uint32(row.TxIndex.Int32) - - commitmentBlock = fn.Some(supplycommit.CommitmentBlock{ - Hash: *blockHash, - Height: blockHeight, - TxIndex: txIndex, - ChainFees: row.ChainFees, - }) - } - - // Construct the root node directly from the stored hash and - // sum. Handle potential NULL values if the root wasn't set yet - // (though FetchSupplyCommit filters for confirmed TX, so it - // should be set). - var ( - rootHash mssmt.NodeHash - rootSum uint64 - rootNode *mssmt.BranchNode + rootCommitment, err := parseSupplyCommitmentRow( + ctx, row.SupplyCommitment, row.TxIndex, db, ) - if len(row.RootHash) != 0 && row.RootSum.Valid { - copy(rootHash[:], row.RootHash) - rootSum = uint64(row.RootSum.Int64) - rootNode = mssmt.NewComputedBranch(rootHash, rootSum) - } else { - // Should not happen due to query filter, but handle - // defensively. - log.Warnf("SupplyCommit: Fetched confirmed commit %d "+ - "but root hash/sum is NULL", row.CommitID) - - rootNode = mssmt.NewComputedBranch( - mssmt.EmptyTreeRootHash, 0, - ) + if err != nil { + return fmt.Errorf("failed to query commitment %d: %w", + row.SupplyCommitment.CommitID, err) } - rootCommitment := supplycommit.RootCommitment{ - Txn: &commitTx, - TxOutIdx: uint32(row.OutputIndex.Int32), - InternalKey: internalKey, - OutputKey: outputKey, - SupplyRoot: rootNode, - CommitmentBlock: commitmentBlock, - } - rootCommitmentOpt = lfn.Some(rootCommitment) + rootCommitmentOpt = lfn.Some(*rootCommitment) return nil }) @@ -845,8 +783,8 @@ func (s *SupplyCommitMachine) BindDanglingUpdatesToTransition( // InsertSignedCommitTx associates a new signed commitment anchor transaction // with the current active supply commitment state transition. func (s *SupplyCommitMachine) InsertSignedCommitTx(ctx context.Context, - assetSpec asset.Specifier, commitDetails supplycommit.SupplyCommitTxn, -) error { + assetSpec asset.Specifier, + commitDetails supplycommit.SupplyCommitTxn) error { groupKey := assetSpec.UnwrapGroupKeyToPtr() if groupKey == nil { @@ -905,24 +843,24 @@ func (s *SupplyCommitMachine) InsertSignedCommitTx(ctx context.Context, KeyIndex: int32(internalKeyDesc.Index), }) if err != nil { - return fmt.Errorf("failed to upsert "+ - "internal key %x: %w", + return fmt.Errorf("error upserting internal key %x: %w", internalKeyDesc.PubKey.SerializeCompressed(), err) } // Insert the new commitment record. Chain details (block // height, header, proof, output index) are NULL at this stage. - //nolint:lll - newCommitmentID, err := db.InsertSupplyCommitment(ctx, sqlc.InsertSupplyCommitmentParams{ - GroupKey: groupKeyBytes, - ChainTxnID: chainTxID, - InternalKeyID: internalKeyID, - OutputKey: outputKey.SerializeCompressed(), - SupplyRootHash: nil, - SupplyRootSum: sql.NullInt64{}, - OutputIndex: sqlInt32(outputIndex), - }) + params := sqlc.InsertSupplyCommitmentParams{ + GroupKey: groupKeyBytes, + ChainTxnID: chainTxID, + InternalKeyID: internalKeyID, + OutputKey: outputKey.SerializeCompressed(), + SupplyRootHash: nil, + SupplyRootSum: sql.NullInt64{}, + OutputIndex: sqlInt32(outputIndex), + SpentCommitment: pendingTransition.OldCommitmentID, + } + newCommitmentID, err := db.InsertSupplyCommitment(ctx, params) if err != nil { return fmt.Errorf("failed to insert new supply "+ "commitment: %w", err) @@ -969,6 +907,200 @@ func (s *SupplyCommitMachine) InsertSignedCommitTx(ctx context.Context, }) } +// InsertSupplyCommit inserts a new, fully complete supply commitment into the +// database. +func (s *SupplyCommitMachine) InsertSupplyCommit(ctx context.Context, + assetSpec asset.Specifier, commit supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + commitTx := commit.Txn + internalKey := commit.InternalKey + outputKey := commit.OutputKey + outputIndex := commit.TxOutIdx + + block, err := commit.CommitmentBlock.UnwrapOrErr( + supplycommit.ErrNoBlockInfo, + ) + if err != nil { + return fmt.Errorf("failed to unwrap commitment block: %w", err) + } + + writeTx := WriteTxOption() + return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // Next, we'll upsert the chain transaction on disk. The block + // related fields are nil as this hasn't been confirmed yet. + var txBytes bytes.Buffer + if err := commitTx.Serialize(&txBytes); err != nil { + return fmt.Errorf("failed to serialize commit "+ + "tx: %w", err) + } + txid := commitTx.TxHash() + chainTxID, err := db.UpsertChainTx(ctx, UpsertChainTxParams{ + Txid: txid[:], + RawTx: txBytes.Bytes(), + }) + if err != nil { + return fmt.Errorf("failed to upsert commit chain tx: "+ + "%w", err) + } + + // Upsert the internal key to get its ID. We assume key family + // and index 0 for now, as this key is likely externally. + internalKeyID, err := db.UpsertInternalKey(ctx, InternalKey{ + RawKey: internalKey.PubKey.SerializeCompressed(), + KeyFamily: int32(internalKey.Family), + KeyIndex: int32(internalKey.Index), + }) + if err != nil { + return fmt.Errorf("failed to upsert internal key %x: "+ + "%w", internalKey.PubKey.SerializeCompressed(), + err) + } + + // Now we fetch the previous commitment that is being spent by + // this one. + var spentCommitment sql.NullInt64 + err = fn.MapOptionZ( + commit.SpentCommitment, func(op wire.OutPoint) error { + q := sqlc.QuerySupplyCommitmentByOutpointParams{ + GroupKey: groupKeyBytes, + Txid: op.Hash[:], + OutputIndex: sqlInt32(op.Index), + } + row, err := db.QuerySupplyCommitmentByOutpoint( + ctx, q, + ) + if err != nil { + return fmt.Errorf("failed to query "+ + "spent commitment: %w", err) + } + + spentCommitment = sqlInt64( + row.SupplyCommitment.CommitID, + ) + + return nil + }, + ) + if err != nil { + return fmt.Errorf("failed to fetch spent commitment: "+ + "%w", err) + } + + // Insert the new commitment record. Chain details (block + // height, header, proof, output index) are NULL at this stage. + params := sqlc.InsertSupplyCommitmentParams{ + GroupKey: groupKeyBytes, + ChainTxnID: chainTxID, + InternalKeyID: internalKeyID, + OutputKey: outputKey.SerializeCompressed(), + SupplyRootHash: nil, + SupplyRootSum: sql.NullInt64{}, + OutputIndex: sqlInt32(outputIndex), + SpentCommitment: spentCommitment, + } + newCommitmentID, err := db.InsertSupplyCommitment(ctx, params) + if err != nil { + return fmt.Errorf("failed to insert new supply "+ + "commitment: %w", err) + } + + // Update the commitment record with the calculated root hash + // and sum. + finalRootSupplyRoot, err := applySupplyUpdatesInternal( + ctx, db, assetSpec, leaves.AllUpdates(), + ) + if err != nil { + return fmt.Errorf("failed to apply SMT updates: "+ + "%w", err) + } + finalRootHash := finalRootSupplyRoot.NodeHash() + finalRootSum := finalRootSupplyRoot.NodeSum() + err = db.UpdateSupplyCommitmentRoot( + ctx, UpdateSupplyCommitmentRootParams{ + CommitID: newCommitmentID, + SupplyRootHash: finalRootHash[:], + SupplyRootSum: sqlInt64(int64(finalRootSum)), + }, + ) + if err != nil { + return fmt.Errorf("failed to update commitment root "+ + "hash/sum for commit %d: %w", + newCommitmentID, err) + } + + // Next, we'll serialize the merkle proofs and block header, so + // we can update them on disk. + var ( + proofBuf bytes.Buffer + headerBuf bytes.Buffer + ) + + err = block.MerkleProof.Encode(&proofBuf) + if err != nil { + return fmt.Errorf("failed to encode "+ + "merkle proof: %w", err) + } + err = block.BlockHeader.Serialize(&headerBuf) + if err != nil { + return fmt.Errorf("failed to "+ + "serialize block header: %w", + err) + } + blockHeight := sqlInt32(block.Height) + + // With all the information serialized above, we'll now update + // the chain proof information for this current supply commit. + err = db.UpdateSupplyCommitmentChainDetails( + ctx, SupplyCommitChainDetails{ + CommitID: newCommitmentID, + MerkleProof: proofBuf.Bytes(), + OutputIndex: sqlInt32(commit.TxOutIdx), + BlockHeader: headerBuf.Bytes(), + ChainTxnID: chainTxID, + BlockHeight: blockHeight, + }, + ) + if err != nil { + return fmt.Errorf("failed to update commitment chain "+ + "details: %w", err) + } + + // Also update the chain_txns record itself with the + // confirmation details (block hash, height, index). + var commitTxBytes bytes.Buffer + err = commit.Txn.Serialize(&commitTxBytes) + if err != nil { + return fmt.Errorf("failed to serialize commit tx for "+ + "update: %w", err) + } + commitTxid := commit.Txn.TxHash() + + _, err = db.UpsertChainTx(ctx, UpsertChainTxParams{ + Txid: commitTxid[:], + RawTx: commitTxBytes.Bytes(), + ChainFees: 0, + BlockHash: lnutils.ByteSlice( + block.BlockHeader.BlockHash(), + ), + BlockHeight: blockHeight, + TxIndex: sqlInt32(block.TxIndex), + }) + if err != nil { + return fmt.Errorf("failed to update chain_txns "+ + "confirmation: %w", err) + } + + return nil + }) +} + // CommitState commits the state of the state machine to disk. func (s *SupplyCommitMachine) CommitState(ctx context.Context, assetSpec asset.Specifier, state supplycommit.State) error { @@ -1003,36 +1135,206 @@ func (s *SupplyCommitMachine) CommitState(ctx context.Context, } // fetchCommitment is a helper to fetch and reconstruct a RootCommitment and -// its associated chain confirmation details. +// its associated chain confirmation details. If no commitment is found, +// it returns None for both the commitment and chain info. func fetchCommitment(ctx context.Context, db SupplyCommitStore, - commitID sql.NullInt64, groupKeyBytes []byte, -) (lfn.Option[supplycommit.RootCommitment], - lfn.Option[commitmentChainInfo], error) { + commitID sql.NullInt64) (lfn.Option[supplycommit.RootCommitment], + error) { noneRootCommit := lfn.None[supplycommit.RootCommitment]() - noneChainInfo := lfn.None[commitmentChainInfo]() if !commitID.Valid { - return noneRootCommit, noneChainInfo, nil + return noneRootCommit, nil } // First, fetch the supply commitment itself. - commit, err := db.QuerySupplyCommitment(ctx, commitID.Int64) + commitRow, err := db.QuerySupplyCommitment(ctx, commitID.Int64) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return noneRootCommit, noneChainInfo, nil + return noneRootCommit, nil + } + return noneRootCommit, fmt.Errorf("failed to query "+ + "commitment %d: %w", commitID.Int64, err) + } + + commit, err := parseSupplyCommitmentRow( + ctx, commitRow.SupplyCommitment, commitRow.TxIndex, db, + ) + if err != nil { + return noneRootCommit, fmt.Errorf("failed to query "+ + "commitment %d: %w", commitID.Int64, err) + } + + return lfn.Some(*commit), nil +} + +// FetchCommitmentByOutpoint fetches a supply commitment by its outpoint and +// group key. If no commitment is found, it returns ErrCommitmentNotFound. +func (s *SupplyCommitMachine) FetchCommitmentByOutpoint(ctx context.Context, + assetSpec asset.Specifier, + outpoint wire.OutPoint) (*supplycommit.RootCommitment, error) { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return nil, ErrMissingGroupKey + } + + var ( + writeTx = WriteTxOption() + groupKeyBytes = groupKey.SerializeCompressed() + commit *supplycommit.RootCommitment + ) + dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, fetch the supply commitment by group key and outpoint. + commitRow, err := db.QuerySupplyCommitmentByOutpoint( + ctx, sqlc.QuerySupplyCommitmentByOutpointParams{ + GroupKey: groupKeyBytes, + OutputIndex: sqlInt32(outpoint.Index), + Txid: outpoint.Hash[:], + }, + ) + if err != nil { + return fmt.Errorf("failed to query commitment for "+ + "outpoint %s: %w", outpoint, err) + } + + commit, err = parseSupplyCommitmentRow( + ctx, commitRow.SupplyCommitment, commitRow.TxIndex, db, + ) + if err != nil { + return fmt.Errorf("failed to parse commitment for "+ + "outpoint %s: %w", outpoint, err) + } + + return nil + }) + if dbErr != nil { + if errors.Is(dbErr, sql.ErrNoRows) { + return nil, supplyverifier.ErrCommitmentNotFound + } + + return nil, fmt.Errorf("failed to fetch commitment by "+ + "outpoint %s: %w", outpoint, dbErr) + } + + return commit, nil +} + +// FetchCommitmentBySpentOutpoint fetches a supply commitment by the outpoint it +// spent and group key. If no commitment is found, it returns +// ErrCommitmentNotFound. +func (s *SupplyCommitMachine) FetchCommitmentBySpentOutpoint( + ctx context.Context, assetSpec asset.Specifier, + spentOutpoint wire.OutPoint) (*supplycommit.RootCommitment, error) { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return nil, ErrMissingGroupKey + } + + var ( + writeTx = WriteTxOption() + groupKeyBytes = groupKey.SerializeCompressed() + commit *supplycommit.RootCommitment + ) + dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, fetch the supply commitment by group key and outpoint. + commitRow, err := db.QuerySupplyCommitmentBySpentOutpoint( + ctx, sqlc.QuerySupplyCommitmentBySpentOutpointParams{ + GroupKey: groupKeyBytes, + OutputIndex: sqlInt32(spentOutpoint.Index), + Txid: spentOutpoint.Hash[:], + }, + ) + if err != nil { + return fmt.Errorf("failed to query commitment for "+ + "spent outpoint %s: %w", spentOutpoint, err) + } + + commit, err = parseSupplyCommitmentRow( + ctx, commitRow.SupplyCommitment, commitRow.TxIndex, db, + ) + if err != nil { + return fmt.Errorf("failed to parse commitment for "+ + "spent outpoint %s: %w", spentOutpoint, err) + } + + return nil + }) + if dbErr != nil { + if errors.Is(dbErr, sql.ErrNoRows) { + return nil, supplyverifier.ErrCommitmentNotFound } - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "query commitment %d: %w", commitID.Int64, err) + + return nil, fmt.Errorf("failed to fetch commitment by spent "+ + "outpoint %s: %w", spentOutpoint, dbErr) } + return commit, nil +} + +// FetchStartingCommitment fetches the very first supply commitment of an asset +// group. If no commitment is found, it returns ErrCommitmentNotFound. +func (s *SupplyCommitMachine) FetchStartingCommitment(ctx context.Context, + assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return nil, ErrMissingGroupKey + } + + var ( + writeTx = WriteTxOption() + groupKeyBytes = groupKey.SerializeCompressed() + commit *supplycommit.RootCommitment + ) + dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, fetch the supply commitment by group key. + commitRow, err := db.QueryStartingSupplyCommitment( + ctx, groupKeyBytes, + ) + if err != nil { + return fmt.Errorf("failed to query starting "+ + "commitment for group %x: %w", groupKeyBytes, + err) + } + + commit, err = parseSupplyCommitmentRow( + ctx, commitRow.SupplyCommitment, commitRow.TxIndex, db, + ) + if err != nil { + return fmt.Errorf("failed to parse starting "+ + "commitment for group %x: %w", groupKeyBytes, + err) + } + + return nil + }) + if dbErr != nil { + if errors.Is(dbErr, sql.ErrNoRows) { + return nil, supplyverifier.ErrCommitmentNotFound + } + + return nil, fmt.Errorf("failed to fetch starting commitment "+ + "for group %x: %w", groupKeyBytes, dbErr) + } + + return commit, nil +} + +// parseSupplyCommitmentRow parses a SupplyCommitment row into a +// supplycommit.RootCommitment and optional commitmentChainInfo. +func parseSupplyCommitmentRow(ctx context.Context, commit SupplyCommitment, + txIndex sql.NullInt32, + db SupplyCommitStore) (*supplycommit.RootCommitment, error) { + internalKeyRow, err := db.FetchInternalKeyByID( ctx, commit.InternalKeyID, ) if err != nil { - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "fetch internal key %d for commit %d: %w", - commit.InternalKeyID, commitID.Int64, err) + return nil, fmt.Errorf("failed to fetch internal key %d for "+ + "commit %d: %w", commit.InternalKeyID, commit.CommitID, + err) } internalKey, err := parseInternalKey(sqlc.InternalKey{ RawKey: internalKeyRow.RawKey, @@ -1040,39 +1342,36 @@ func fetchCommitment(ctx context.Context, db SupplyCommitStore, KeyIndex: internalKeyRow.KeyIndex, }) if err != nil { - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "parse internal key for commit %d: %w", commitID.Int64, - err) + return nil, fmt.Errorf("failed to parse internal key for "+ + "commit %d: %w", commit.CommitID, err) } outputKey, err := btcec.ParsePubKey(commit.OutputKey) if err != nil { - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "parse output key for commit %d: %w", commitID.Int64, - err) + return nil, fmt.Errorf("failed to parse output key for commit "+ + "%d: %w", commit.CommitID, err) } // Fetch and deserialize the transaction. var commitTx wire.MsgTx chainTxRow, err := db.FetchChainTxByID(ctx, commit.ChainTxnID) if err != nil { - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "fetch chain tx %d for commit %d: %w", - commit.ChainTxnID, commitID.Int64, err) + return nil, fmt.Errorf("failed to fetch chain tx %d for "+ + "commit %d: %w", commit.ChainTxnID, commit.CommitID, + err) } err = commitTx.Deserialize(bytes.NewReader(chainTxRow.RawTx)) if err != nil { - return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ - "deserialize commit tx for commit %d: %w", - commitID.Int64, err) + return nil, fmt.Errorf("failed to deserialize commit tx for "+ + "commit %d: %w", commit.CommitID, err) } // Construct the SMT root node from the stored hash and sum. If they are // NULL (e.g., initial commit before ApplyStateTransition ran), use the // empty root. var rootNode *mssmt.BranchNode - if commit.SupplyRootHash == nil || !commit.SupplyRootSum.Valid { + if len(commit.SupplyRootHash) == 0 || !commit.SupplyRootSum.Valid { log.Warnf("fetchCommitment: Supply root hash/sum is NULL for "+ - "commit %d, using empty root", commitID.Int64) + "commit %d, using empty root", commit.CommitID) rootNode = mssmt.NewComputedBranch(mssmt.EmptyTreeRootHash, 0) } else { var rootHash mssmt.NodeHash @@ -1081,7 +1380,7 @@ func fetchCommitment(ctx context.Context, db SupplyCommitStore, rootNode = mssmt.NewComputedBranch(rootHash, rootSum) } - rootCommitment := supplycommit.RootCommitment{ + rootCommitment := &supplycommit.RootCommitment{ Txn: &commitTx, TxOutIdx: uint32(commit.OutputIndex.Int32), InternalKey: internalKey, @@ -1089,9 +1388,6 @@ func fetchCommitment(ctx context.Context, db SupplyCommitStore, SupplyRoot: rootNode, } - // Now, attempt to construct the chain info if confirmed. - var chainInfoOpt lfn.Option[commitmentChainInfo] - // If we have a valid block height, then that means that the block // header and/or merkle proof may also be present. if commit.BlockHeight.Valid { @@ -1107,7 +1403,7 @@ func fetchCommitment(ctx context.Context, db SupplyCommitStore, // Log error but don't fail the whole fetch log.Errorf("fetchCommitment: failed to "+ "deserialize block header "+ - "for commit %d: %v", commitID.Int64, + "for commit %d: %v", commit.CommitID, err) blockHeader = nil } @@ -1122,26 +1418,54 @@ func fetchCommitment(ctx context.Context, db SupplyCommitStore, if err != nil { log.Errorf("fetchCommitment: failed to "+ "decode merkle proof for commit %d: "+ - "%v", commitID.Int64, err) + "%v", commit.CommitID, err) merkleProof = nil } } if blockHeader != nil && merkleProof != nil { - chainInfoOpt = lfn.Some(commitmentChainInfo{ - BlockHeader: blockHeader, - MerkleProof: merkleProof, - BlockHeight: blockHeight, - }) + rootCommitment.CommitmentBlock = fn.Some( + supplycommit.CommitmentBlock{ + Height: blockHeight, + Hash: blockHeader.BlockHash(), + TxIndex: uint32(txIndex.Int32), + BlockHeader: blockHeader, + MerkleProof: merkleProof, + }, + ) } else { log.Warnf("fetchCommitment: commit %d has block "+ "height but missing header (%v) or proof (%v)", - commitID.Int64, blockHeader == nil, + commit.CommitID, blockHeader == nil, merkleProof == nil) } } - return lfn.Some(rootCommitment), chainInfoOpt, nil + if commit.SpentCommitment.Valid { + spentRow, err := db.QuerySupplyCommitmentOutpoint( + ctx, commit.SpentCommitment.Int64, + ) + if err != nil { + return nil, fmt.Errorf("failed to query spent "+ + "commitment with ID %d for commit %d: %w", + commit.SpentCommitment.Int64, commit.CommitID, + err) + } + + hash, err := chainhash.NewHash(spentRow.Txid) + if err != nil { + return nil, fmt.Errorf("failed to parse spent "+ + "commitment txid %x for commit %d: %w", + spentRow.Txid, commit.CommitID, err) + } + + rootCommitment.SpentCommitment = fn.Some(wire.OutPoint{ + Hash: *hash, + Index: uint32(spentRow.OutputIndex.Int32), + }) + } + + return rootCommitment, nil } // FetchState attempts to fetch the state of the state machine for the @@ -1237,37 +1561,44 @@ func (s *SupplyCommitMachine) FetchState(ctx context.Context, // Next, we'll fetch the old and new commitments. If this is the // very first state transition, there won't be an old // commitment. - oldCommitmentOpt, _, err = fetchCommitment( - ctx, db, dbTransition.OldCommitmentID, groupKeyBytes, + oldCommitmentOpt, err = fetchCommitment( + ctx, db, dbTransition.OldCommitmentID, ) if err != nil { return fmt.Errorf("failed fetching old "+ "commitment: %w", err) } - newCommitmentOpt, newCommitChainInfoOpt, err := fetchCommitment( - ctx, db, dbTransition.NewCommitmentID, groupKeyBytes, + newCommitmentOpt, err := fetchCommitment( + ctx, db, dbTransition.NewCommitmentID, ) if err != nil { return fmt.Errorf("failed fetching new "+ "commitment: %w", err) } - // Construct the ChainProof if the new commitment's chain info - // is present. - newCommitChainInfoOpt.WhenSome(func(info commitmentChainInfo) { - if info.BlockHeader != nil && info.MerkleProof != nil { - chainProofOpt = lfn.Some(supplycommit.ChainProof{ //nolint:lll - Header: *info.BlockHeader, - BlockHeight: info.BlockHeight, - MerkleProof: *info.MerkleProof, - }) - } - }) - newCommit = newCommitmentOpt.UnwrapOr( supplycommit.RootCommitment{}, ) + newCommit.CommitmentBlock.WhenSome( + func(b supplycommit.CommitmentBlock) { + if b.BlockHeader == nil || + b.MerkleProof == nil { + + return + } + + chainProofOpt = lfn.Some( + supplycommit.ChainProof{ + Header: *b.BlockHeader, + BlockHeight: b.Height, + MerkleProof: *b.MerkleProof, + TxIndex: b.TxIndex, + }, + ) + }, + ) + return nil }) if err != nil { @@ -1507,7 +1838,8 @@ func (s *SupplyCommitMachine) ApplyStateTransition( GroupKey: groupKeyBytes, StateName: sqlStr(defaultStateName), LatestCommitmentID: dbTransition.NewCommitmentID, //nolint:lll - }) + }, + ) if err != nil { return fmt.Errorf("failed to update state machine to "+ "default: %w", err) diff --git a/tapdb/supply_commit_test.go b/tapdb/supply_commit_test.go index 1489de4cb..095803451 100644 --- a/tapdb/supply_commit_test.go +++ b/tapdb/supply_commit_test.go @@ -18,6 +18,7 @@ import ( "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" lfn "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/keychain" @@ -606,27 +607,30 @@ func (h *supplyCommitTestHarness) fetchCommitmentByID( var commitment sqlc.SupplyCommitment readTx := ReadTxOption() - err := h.commitMachine.db.ExecTx(h.ctx, readTx, - func(db SupplyCommitStore) error { - var txErr error - commitment, txErr = db.QuerySupplyCommitment( - h.ctx, commitID, - ) - return txErr + err := h.commitMachine.db.ExecTx( + h.ctx, readTx, func(db SupplyCommitStore) error { + row, err := db.QuerySupplyCommitment(h.ctx, commitID) + if err != nil { + return err + } + + commitment = row.SupplyCommitment + + return nil }, ) return commitment, err } // fetchInternalKeyByID fetches an internal key by ID directly via SQL. -// -//nolint:lll -func (h *supplyCommitTestHarness) fetchInternalKeyByID(keyID int64) FetchInternalKeyByIDRow { +func (h *supplyCommitTestHarness) fetchInternalKeyByID( + keyID int64) FetchInternalKeyByIDRow { + h.t.Helper() var keyRow FetchInternalKeyByIDRow readTx := ReadTxOption() - err := h.commitMachine.db.ExecTx(h.ctx, readTx, - func(db SupplyCommitStore) error { + err := h.commitMachine.db.ExecTx( + h.ctx, readTx, func(db SupplyCommitStore) error { var txErr error keyRow, txErr = db.FetchInternalKeyByID(h.ctx, keyID) return txErr @@ -637,13 +641,13 @@ func (h *supplyCommitTestHarness) fetchInternalKeyByID(keyID int64) FetchInterna } // fetchChainTxByID fetches a chain tx by ID directly via SQL. -func (h *supplyCommitTestHarness) fetchChainTxByID(txID int64, -) (FetchChainTxByIDRow, error) { +func (h *supplyCommitTestHarness) fetchChainTxByID( + txID int64) (FetchChainTxByIDRow, error) { var chainTx FetchChainTxByIDRow readTx := ReadTxOption() - err := h.commitMachine.db.ExecTx(h.ctx, readTx, - func(db SupplyCommitStore) error { + err := h.commitMachine.db.ExecTx( + h.ctx, readTx, func(db SupplyCommitStore) error { var txErr error chainTx, txErr = db.FetchChainTxByID(h.ctx, txID) return txErr @@ -1944,17 +1948,7 @@ func TestSupplyCommitMachineFetch(t *testing.T) { require.False(t, commitOpt.IsNone()) // Fetch the commitment details directly for comparison. - var dbCommit sqlc.SupplyCommitment - readTx := ReadTxOption() - err = h.commitMachine.db.ExecTx( - h.ctx, readTx, func(dbtx SupplyCommitStore) error { - var txErr error - dbCommit, txErr = dbtx.QuerySupplyCommitment( - h.ctx, commitID1, - ) - return txErr - }, - ) + dbCommit, err := h.fetchCommitmentByID(commitID1) require.NoError(t, err) // We'll now assert that the populated commitment we just read matches @@ -2071,6 +2065,140 @@ func encodeTx(tx *wire.MsgTx) ([]byte, error) { return buf.Bytes(), err } +// TestSupplySyncerPushLog tests the LogSupplyCommitPush method which logs +// successful pushes to remote universe servers. +func TestSupplySyncerPushLog(t *testing.T) { + t.Parallel() + + // Set up the test harness with all necessary components. + h := newSupplyCommitTestHarness(t) + + // Create a test supply commitment that we can reference. + // Use the same simple approach as + // TestSupplyCommitMultipleSupplyCommitments. + genTxData := func() (int64, []byte, []byte) { + genesisPoint := test.RandOp(h.t) + tx := wire.NewMsgTx(2) + tx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: genesisPoint, + }) + tx.AddTxOut(&wire.TxOut{ + Value: 1000, + PkScript: test.RandBytes(20), + }) + + txBytes, err := encodeTx(tx) + require.NoError(h.t, err) + txid := tx.TxHash() + chainTxID, err := h.db.UpsertChainTx( + h.ctx, sqlc.UpsertChainTxParams{ + Txid: txid[:], + RawTx: txBytes, + }, + ) + require.NoError(h.t, err) + return chainTxID, txid[:], txBytes + } + + chainTxID, txid, rawTx := genTxData() + commitID := h.addTestSupplyCommitment(chainTxID, txid, rawTx, false) + + // Get the supply root that was created by addTestSupplyCommitment. + rows, err := h.db.(sqlc.DBTX).QueryContext(h.ctx, ` + SELECT supply_root_hash, supply_root_sum FROM supply_commitments + WHERE commit_id = $1 + `, commitID) + require.NoError(t, err) + defer rows.Close() + require.True(t, rows.Next(), "Expected supply commitment to exist") + + var ( + rootHashBytes []byte + rootSum int64 + ) + err = rows.Scan(&rootHashBytes, &rootSum) + require.NoError(t, err) + require.NoError(t, rows.Close()) + + var rootHash mssmt.NodeHash + copy(rootHash[:], rootHashBytes) + + // Decode the raw transaction to get the actual wire.MsgTx used in the + // test data. + var actualTx wire.MsgTx + err = actualTx.Deserialize(bytes.NewReader(rawTx)) + require.NoError(t, err) + + // Create a SupplySyncerStore and test the actual LogSupplyCommitPush + // method. + syncerStore := NewSupplySyncerStore(h.batchedTreeDB) + + // Create mock data for the method call. + serverAddr := universe.NewServerAddrFromStr("localhost:8080") + supplyRoot := mssmt.NewComputedBranch(rootHash, uint64(rootSum)) + + // Create minimal supply leaves - just need something to count. + // We need at least one leaf or the method returns early without + // logging. + mintEvent := supplycommit.NewMintEvent{ + MintHeight: 100, + } + leaves := supplycommit.SupplyLeaves{ + IssuanceLeafEntries: []supplycommit.NewMintEvent{mintEvent}, + } + + commitment := supplycommit.RootCommitment{ + SupplyRoot: supplyRoot, + Txn: &actualTx, + TxOutIdx: 0, + InternalKey: keychain.KeyDescriptor{PubKey: h.groupPubKey}, + OutputKey: h.groupPubKey, + } + + // Record the time before the call to verify timestamp is recent. + beforeCall := time.Now().Unix() + + // Test the actual LogSupplyCommitPush method. + err = syncerStore.LogSupplyCommitPush( + h.ctx, serverAddr, h.assetSpec, commitment, leaves, + ) + require.NoError(t, err, "LogSupplyCommitPush should work") + + afterCall := time.Now().Unix() + + // Verify the log entry was created correctly using the new fetch query. + var logEntries []sqlc.SupplySyncerPushLog + readTx := ReadTxOption() + err = h.batchedTreeDB.ExecTx(h.ctx, readTx, + func(dbTx BaseUniverseStore) error { + var txErr error + logEntries, txErr = dbTx.FetchSupplySyncerPushLogs( + h.ctx, h.groupKeyBytes, + ) + return txErr + }, + ) + require.NoError(t, err) + require.Len(t, logEntries, 1, "Expected exactly one push log entry") + + logEntry := logEntries[0] + + // Verify all the fields are correct. + require.Equal(t, h.groupKeyBytes, logEntry.GroupKey) + require.Equal(t, int32(100), logEntry.MaxPushedBlockHeight) + require.Equal(t, "localhost:8080", logEntry.ServerAddress) + require.Equal(t, txid, logEntry.CommitTxid) + require.Equal(t, int32(0), logEntry.OutputIndex) + require.Equal(t, int32(1), logEntry.NumLeavesPushed) + require.GreaterOrEqual(t, logEntry.CreatedAt, beforeCall) + require.LessOrEqual(t, logEntry.CreatedAt, afterCall) + + t.Logf("Successfully logged push: commitTxid=%x, outputIndex=%d, "+ + "timestamp=%d, leaves=%d", logEntry.CommitTxid, + logEntry.OutputIndex, logEntry.CreatedAt, + logEntry.NumLeavesPushed) +} + // assertEqualEvents compares two supply update events by serializing them and // comparing the resulting bytes. func assertEqualEvents(t *testing.T, expected, diff --git a/tapdb/supply_syncer.go b/tapdb/supply_syncer.go new file mode 100644 index 000000000..ee4c3ecef --- /dev/null +++ b/tapdb/supply_syncer.go @@ -0,0 +1,105 @@ +package tapdb + +import ( + "context" + "fmt" + "time" + + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" +) + +// SupplySyncerStore implements the persistent storage for supply syncing +// operations. It provides methods to store supply updates without requiring +// a supply commitment transition. +type SupplySyncerStore struct { + db BatchedUniverseTree +} + +// NewSupplySyncerStore creates a new supply syncer DB store handle. +func NewSupplySyncerStore(db BatchedUniverseTree) *SupplySyncerStore { + return &SupplySyncerStore{ + db: db, + } +} + +// LogSupplyCommitPush logs that a supply commitment and its leaves +// have been successfully pushed to a remote universe server. +func (s *SupplySyncerStore) LogSupplyCommitPush(ctx context.Context, + serverAddr universe.ServerAddr, assetSpec asset.Specifier, + commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + // Calculate the total number of leaves in this push. + numLeaves := int32(len(leaves.IssuanceLeafEntries) + + len(leaves.BurnLeafEntries) + + len(leaves.IgnoreLeafEntries)) + + // If no leaves were provided, return early without error. + if numLeaves == 0 { + return nil + } + + // Find the highest block height from all the supply leaves. + var maxBlockHeight uint32 + for _, leafEntry := range leaves.IssuanceLeafEntries { + if height := leafEntry.BlockHeight(); height > maxBlockHeight { + maxBlockHeight = height + } + } + for _, leafEntry := range leaves.BurnLeafEntries { + if height := leafEntry.BlockHeight(); height > maxBlockHeight { + maxBlockHeight = height + } + } + for _, leafEntry := range leaves.IgnoreLeafEntries { + if height := leafEntry.BlockHeight(); height > maxBlockHeight { + maxBlockHeight = height + } + } + + // All leaves must have a valid block height. + if maxBlockHeight == 0 { + return fmt.Errorf("all supply leaves must have a valid " + + "block height greater than 0") + } + + // Extract the group key for the log entry. + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return fmt.Errorf("group key must be specified for supply "+ + "syncer log: %w", err) + } + + groupKeyBytes := groupKey.SerializeCompressed() + + // Extract the outpoint (transaction ID and output index) from the + // commitment. + commitTxid := commitment.Txn.TxHash() + outputIndex := commitment.TxOutIdx + + var writeTx BaseUniverseStoreOptions + return s.db.ExecTx(ctx, &writeTx, func(dbTx BaseUniverseStore) error { + // Insert the push log entry. The SQL query will find the + // chain_txn_id by looking up the supply commitment using the + // commitment transaction hash and output index (outpoint). + params := sqlc.InsertSupplySyncerPushLogParams{ + GroupKey: groupKeyBytes, + MaxPushedBlockHeight: int32(maxBlockHeight), + ServerAddress: serverAddr.HostStr(), + CommitTxid: commitTxid[:], + OutputIndex: int32(outputIndex), + NumLeavesPushed: numLeaves, + CreatedAt: time.Now().Unix(), + } + err := dbTx.InsertSupplySyncerPushLog(ctx, params) + if err != nil { + return fmt.Errorf("failed to log supply commit push: "+ + "%w", err) + } + + return nil + }) +} diff --git a/tapdb/supply_tree.go b/tapdb/supply_tree.go index 549b51813..3134aa976 100644 --- a/tapdb/supply_tree.go +++ b/tapdb/supply_tree.go @@ -10,6 +10,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" @@ -196,10 +197,108 @@ func fetchSubTreeInternal(ctx context.Context, db BaseUniverseStore, return memTree, nil } -// FetchSubTrees returns copies of all sub-trees (mint, burn, ignore) for the +// filterSubTree applies filtering to the leaves of a subtree. +func filterSubTree(ctx context.Context, + treeType supplycommit.SupplySubTree, subTree mssmt.Tree, + blockHeightEnd fn.Option[uint32]) (mssmt.Tree, error) { + + if blockHeightEnd.IsNone() { + // No filtering needed, return the original tree. + return subTree, nil + } + + // Create a new in-memory tree to copy into. + filteredSubTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + + // Create a predicate function to filter leaves based on block height. + filterPredicate := func(key [32]byte, leaf mssmt.LeafNode) (bool, + error) { + + blockHeightEndVal, err := blockHeightEnd.UnwrapOrErr( + fmt.Errorf("block height end not set"), + ) + if err != nil { + return false, err + } + + // Decode the leaf based on the tree type to extract block + // height. + switch treeType { + case supplycommit.MintTreeType: + // For mint trees, decode mint event to get block + // height. + var mintEvent supplycommit.NewMintEvent + err := mintEvent.Decode(bytes.NewReader(leaf.Value)) + if err != nil { + return false, fmt.Errorf("unable to decode "+ + "mint event: %w", err) + } + + // Extract block height directly from the mint event. + mintBlockHeight := mintEvent.MintHeight + + // Include the leaf if it's within range. + return mintBlockHeight <= blockHeightEndVal, nil + + case supplycommit.BurnTreeType: + // For burn trees, decode burn leaf to get block height. + var burnLeaf universe.BurnLeaf + err := burnLeaf.Decode(bytes.NewReader(leaf.Value)) + if err != nil { + return false, fmt.Errorf("unable to decode "+ + "burn leaf: %w", err) + } + + // Extract block height directly from the burn proof. + proofBlockHeight := burnLeaf.BurnProof.BlockHeight + + // Include the leaf if it's within range. + return proofBlockHeight <= blockHeightEndVal, nil + + case supplycommit.IgnoreTreeType: + // For ignore trees, decode signed ignore tuple to get + // block height. + var signedIgnoreTuple universe.SignedIgnoreTuple + err := signedIgnoreTuple.Decode( + bytes.NewReader(leaf.Value), + ) + if err != nil { + return false, fmt.Errorf("unable to decode "+ + "signed ignore tuple: %w", err) + } + + // Extract block height directly from the "ignore" + // tuple. + tupleBlockHeight := + signedIgnoreTuple.IgnoreTuple.Val.BlockHeight + + // Include the leaf if it's within range. + return tupleBlockHeight <= blockHeightEndVal, nil + + default: + return false, fmt.Errorf("unknown tree type: %v", + treeType) + } + } + + // Copy the persistent tree to the in-memory tree with filtering. + err := subTree.CopyFilter(ctx, filteredSubTree, filterPredicate) + if err != nil { + return nil, fmt.Errorf("unable to copy "+ + "sub-tree: %w", err) + } + + return filteredSubTree, nil +} + +// FetchSubTrees returns copies of all subtrees (mint, burn, ignore) for the // given asset spec. +// +// If blockHeightEnd is specified, only leaves with a block height less than +// or equal to the given height are included in the returned subtrees. func (s *SupplyTreeStore) FetchSubTrees(ctx context.Context, - spec asset.Specifier) lfn.Result[supplycommit.SupplyTrees] { + spec asset.Specifier, + blockHeightEnd fn.Option[uint32]) lfn.Result[supplycommit.SupplyTrees] { groupKey, err := spec.UnwrapGroupKeyOrErr() if err != nil { @@ -222,7 +321,15 @@ func (s *SupplyTreeStore) FetchSubTrees(ctx context.Context, "sub-tree %v: %w", treeType, fetchErr) } - trees[treeType] = subTree + filteredSubTree, err := filterSubTree( + ctx, treeType, subTree, blockHeightEnd, + ) + if err != nil { + return fmt.Errorf("failed to filter "+ + "sub-tree %v: %w", treeType, err) + } + + trees[treeType] = filteredSubTree } return nil }) @@ -233,6 +340,30 @@ func (s *SupplyTreeStore) FetchSubTrees(ctx context.Context, return lfn.Ok(trees) } +// fetchRootSupplyTreeInternal fetches and copies the root supply tree within +// an existing database transaction. Returned tree is a copy in memory. +func fetchRootSupplyTreeInternal(ctx context.Context, db BaseUniverseStore, + groupKey *btcec.PublicKey) (mssmt.Tree, error) { + + rootNs := rootSupplyNamespace(groupKey) + + // Create a wrapper for the persistent tree store. + persistentStore := newTreeStoreWrapperTx(db, rootNs) + persistentTree := mssmt.NewCompactedTree(persistentStore) + + // Create a new in-memory tree to copy into. + memTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + + // Copy the persistent tree to the in-memory tree. + err := persistentTree.Copy(ctx, memTree) + if err != nil { + return nil, fmt.Errorf("unable to copy root supply "+ + "tree %s: %w", rootNs, err) + } + + return memTree, nil +} + // FetchRootSupplyTree returns a copy of the root supply tree for the given // asset spec. func (s *SupplyTreeStore) FetchRootSupplyTree(ctx context.Context, @@ -245,24 +376,13 @@ func (s *SupplyTreeStore) FetchRootSupplyTree(ctx context.Context, ) } - rootNs := rootSupplyNamespace(groupKey) - var treeCopy mssmt.Tree readTx := NewBaseUniverseReadTx() err = s.db.ExecTx(ctx, &readTx, func(db BaseUniverseStore) error { - // Create a wrapper for the persistent tree store. - persistentStore := newTreeStoreWrapperTx(db, rootNs) - persistentTree := mssmt.NewCompactedTree(persistentStore) - - // Create a new in-memory tree to copy into. - memTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) - - // Copy the persistent tree to the in-memory tree. - err := persistentTree.Copy(ctx, memTree) + memTree, err := fetchRootSupplyTreeInternal(ctx, db, groupKey) if err != nil { - return fmt.Errorf("unable to copy root supply "+ - "tree %s: %w", rootNs, err) + return err } treeCopy = memTree @@ -275,6 +395,55 @@ func (s *SupplyTreeStore) FetchRootSupplyTree(ctx context.Context, return lfn.Ok(treeCopy) } +// FetchSupplyTrees returns a copy of the root supply tree and subtrees for the +// given asset spec. +func (s *SupplyTreeStore) FetchSupplyTrees(ctx context.Context, + spec asset.Specifier) (mssmt.Tree, *supplycommit.SupplyTrees, error) { + + groupKey, err := spec.UnwrapGroupKeyOrErr() + if err != nil { + return nil, nil, fmt.Errorf( + "group key must be specified for supply tree: %w", err, + ) + } + + var ( + rootTree mssmt.Tree + subTrees = make(supplycommit.SupplyTrees) + ) + + readTx := NewBaseUniverseReadTx() + err = s.db.ExecTx(ctx, &readTx, func(db BaseUniverseStore) error { + // Fetch the root supply tree. + memTree, err := fetchRootSupplyTreeInternal(ctx, db, groupKey) + if err != nil { + return err + } + + rootTree = memTree + + // Fetch all the subtrees. + for _, treeType := range allSupplyTreeTypes { + subTree, fetchErr := fetchSubTreeInternal( + ctx, db, groupKey, treeType, + ) + if fetchErr != nil { + return fmt.Errorf("failed to fetch subtree "+ + "%v: %w", treeType, fetchErr) + } + + subTrees[treeType] = subTree + } + + return nil + }) + if err != nil { + return nil, nil, fmt.Errorf("failed to execute db tx: %w", err) + } + + return rootTree, &subTrees, nil +} + // registerMintSupplyInternal inserts a new minting leaf into the mint supply // sub-tree within an existing database transaction. It returns the universe // proof containing the new sub-tree root. diff --git a/tapdb/universe.go b/tapdb/universe.go index 15a1822de..54678177a 100644 --- a/tapdb/universe.go +++ b/tapdb/universe.go @@ -138,6 +138,16 @@ type BaseUniverseStore interface { QuerySupplyLeavesByHeight(ctx context.Context, arg QuerySupplyLeavesByHeightParams) ( []sqlc.QuerySupplyLeavesByHeightRow, error) + + // InsertSupplySyncerPushLog inserts a supply syncer push log entry to + // track a successful push to a remote universe server. + InsertSupplySyncerPushLog(ctx context.Context, + arg sqlc.InsertSupplySyncerPushLogParams) error + + // FetchSupplySyncerPushLogs fetches all push log entries for + // a given asset group, ordered by creation time (newest first). + FetchSupplySyncerPushLogs(ctx context.Context, + groupKey []byte) ([]sqlc.SupplySyncerPushLog, error) } // getUniverseTreeSum retrieves the sum of a universe tree specified by its diff --git a/tapgarden/caretaker.go b/tapgarden/caretaker.go index 8e16947d3..aaea11c3c 100644 --- a/tapgarden/caretaker.go +++ b/tapgarden/caretaker.go @@ -1597,7 +1597,7 @@ type emptyCacheVal = singleCacheValue[emptyVal] // GenGroupVerifier generates a group key verification callback function given a // DB handle. func GenGroupVerifier(ctx context.Context, - mintingStore MintingStore) func(*btcec.PublicKey) error { + mintingStore GroupFetcher) func(*btcec.PublicKey) error { // Cache known group keys that were previously fetched. assetGroups := lru.NewCache[asset.SerializedKey, emptyCacheVal]( @@ -1633,7 +1633,7 @@ func GenGroupVerifier(ctx context.Context, // GenGroupAnchorVerifier generates a caching group anchor verification // callback function given a DB handle. func GenGroupAnchorVerifier(ctx context.Context, - mintingStore MintingStore) func(*asset.Genesis, *asset.GroupKey) error { + mintingStore GroupFetcher) func(*asset.Genesis, *asset.GroupKey) error { // Cache anchors for groups that were previously fetched. groupAnchors := lru.NewCache[ diff --git a/tapgarden/interface.go b/tapgarden/interface.go index bba7a60ba..786f46700 100644 --- a/tapgarden/interface.go +++ b/tapgarden/interface.go @@ -303,6 +303,14 @@ type MintingStore interface { groupKey btcec.PublicKey) (fn.Option[DelegationKey], error) } +// GroupFetcher is an interface that allows fetching of asset groups. +type GroupFetcher interface { + // FetchGroupByGroupKey fetches the asset group with a matching tweaked + // key, including the genesis information used to create the group. + FetchGroupByGroupKey(ctx context.Context, + groupKey *btcec.PublicKey) (*asset.AssetGroup, error) +} + // ChainBridge is our bridge to the target chain. It's used to get confirmation // notifications, the current height, publish transactions, and also estimate // fees. diff --git a/taprpc/perms.go b/taprpc/perms.go index 2e87a3945..6bffd98bb 100644 --- a/taprpc/perms.go +++ b/taprpc/perms.go @@ -263,6 +263,10 @@ var ( Entity: "universe", Action: "write", }}, + "/universerpc.Universe/InsertSupplyCommit": {{ + Entity: "universe", + Action: "write", + }}, "/universerpc.Universe/FetchSupplyCommit": {{ Entity: "universe", Action: "read", @@ -370,13 +374,15 @@ func MacaroonWhitelist(allowUniPublicAccessRead bool, // nolint: lll if allowUniPublicAccessRead || allowPublicUniProofCourier { whitelist["/universerpc.Universe/QueryProof"] = struct{}{} - whitelist["/universerpc.Universe/FetchSupplyLeaves"] = struct{}{} + whitelist["/universerpc.Universe/FetchSupplyCommit"] = struct{}{} whitelist["/authmailboxrpc.Mailbox/ReceiveMessages"] = struct{}{} } // Conditionally whitelist universe server write methods. + // nolint: lll if allowUniPublicAccessWrite || allowPublicUniProofCourier { whitelist["/universerpc.Universe/InsertProof"] = struct{}{} + whitelist["/universerpc.Universe/InsertSupplyCommit"] = struct{}{} whitelist["/authmailboxrpc.Mailbox/SendMessage"] = struct{}{} } diff --git a/taprpc/universerpc/universe.pb.go b/taprpc/universerpc/universe.pb.go index 558cb2fb4..8db15e6f4 100644 --- a/taprpc/universerpc/universe.pb.go +++ b/taprpc/universerpc/universe.pb.go @@ -3670,15 +3670,14 @@ type FetchSupplyCommitRequest struct { // *FetchSupplyCommitRequest_GroupKeyBytes // *FetchSupplyCommitRequest_GroupKeyStr GroupKey isFetchSupplyCommitRequest_GroupKey `protobuf_oneof:"group_key"` - // Optional: A list of issuance leaf keys. For each key in this list, - // the endpoint will generate and return an inclusion proof. - IssuanceLeafKeys [][]byte `protobuf:"bytes,3,rep,name=issuance_leaf_keys,json=issuanceLeafKeys,proto3" json:"issuance_leaf_keys,omitempty"` - // Optional: A list of burn leaf keys. For each key in this list, - // the endpoint will generate and return an inclusion proof. - BurnLeafKeys [][]byte `protobuf:"bytes,4,rep,name=burn_leaf_keys,json=burnLeafKeys,proto3" json:"burn_leaf_keys,omitempty"` - // Optional: A list of ignore leaf keys. For each key in this list, the - // endpoint will generate and return an inclusion proof. - IgnoreLeafKeys [][]byte `protobuf:"bytes,5,rep,name=ignore_leaf_keys,json=ignoreLeafKeys,proto3" json:"ignore_leaf_keys,omitempty"` + // Specifies which supply commit to fetch. + // + // Types that are assignable to Locator: + // + // *FetchSupplyCommitRequest_CommitOutpoint + // *FetchSupplyCommitRequest_SpentCommitOutpoint + // *FetchSupplyCommitRequest_VeryFirst + Locator isFetchSupplyCommitRequest_Locator `protobuf_oneof:"locator"` } func (x *FetchSupplyCommitRequest) Reset() { @@ -3734,27 +3733,34 @@ func (x *FetchSupplyCommitRequest) GetGroupKeyStr() string { return "" } -func (x *FetchSupplyCommitRequest) GetIssuanceLeafKeys() [][]byte { - if x != nil { - return x.IssuanceLeafKeys +func (m *FetchSupplyCommitRequest) GetLocator() isFetchSupplyCommitRequest_Locator { + if m != nil { + return m.Locator } return nil } -func (x *FetchSupplyCommitRequest) GetBurnLeafKeys() [][]byte { - if x != nil { - return x.BurnLeafKeys +func (x *FetchSupplyCommitRequest) GetCommitOutpoint() *taprpc.OutPoint { + if x, ok := x.GetLocator().(*FetchSupplyCommitRequest_CommitOutpoint); ok { + return x.CommitOutpoint } return nil } -func (x *FetchSupplyCommitRequest) GetIgnoreLeafKeys() [][]byte { - if x != nil { - return x.IgnoreLeafKeys +func (x *FetchSupplyCommitRequest) GetSpentCommitOutpoint() *taprpc.OutPoint { + if x, ok := x.GetLocator().(*FetchSupplyCommitRequest_SpentCommitOutpoint); ok { + return x.SpentCommitOutpoint } return nil } +func (x *FetchSupplyCommitRequest) GetVeryFirst() bool { + if x, ok := x.GetLocator().(*FetchSupplyCommitRequest_VeryFirst); ok { + return x.VeryFirst + } + return false +} + type isFetchSupplyCommitRequest_GroupKey interface { isFetchSupplyCommitRequest_GroupKey() } @@ -3774,6 +3780,39 @@ func (*FetchSupplyCommitRequest_GroupKeyBytes) isFetchSupplyCommitRequest_GroupK func (*FetchSupplyCommitRequest_GroupKeyStr) isFetchSupplyCommitRequest_GroupKey() {} +type isFetchSupplyCommitRequest_Locator interface { + isFetchSupplyCommitRequest_Locator() +} + +type FetchSupplyCommitRequest_CommitOutpoint struct { + // Fetch the the supply commitment that created this new commitment + // output on chain. + CommitOutpoint *taprpc.OutPoint `protobuf:"bytes,3,opt,name=commit_outpoint,json=commitOutpoint,proto3,oneof"` +} + +type FetchSupplyCommitRequest_SpentCommitOutpoint struct { + // Fetch the supply commitment that spent the specified commitment + // output on chain to create a new supply commitment. This can be used + // to traverse the chain of supply commitments by watching the spend of + // the commitment output. + SpentCommitOutpoint *taprpc.OutPoint `protobuf:"bytes,4,opt,name=spent_commit_outpoint,json=spentCommitOutpoint,proto3,oneof"` +} + +type FetchSupplyCommitRequest_VeryFirst struct { + // Fetch the very first supply commitment for the asset group. This + // returns the initial supply commitment that spent the pre-commitment + // output of the very first asset mint of a grouped asset (also known + // as the group anchor). This is useful as the starting point to fetch + // all supply commitments for a grouped asset one by one. + VeryFirst bool `protobuf:"varint,5,opt,name=very_first,json=veryFirst,proto3,oneof"` +} + +func (*FetchSupplyCommitRequest_CommitOutpoint) isFetchSupplyCommitRequest_Locator() {} + +func (*FetchSupplyCommitRequest_SpentCommitOutpoint) isFetchSupplyCommitRequest_Locator() {} + +func (*FetchSupplyCommitRequest_VeryFirst) isFetchSupplyCommitRequest_Locator() {} + type SupplyCommitSubtreeRoot struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3855,44 +3894,37 @@ type FetchSupplyCommitResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The supply commitment merkle sum root node for the specified asset. - SupplyCommitmentRoot *MerkleSumNode `protobuf:"bytes,1,opt,name=supply_commitment_root,json=supplyCommitmentRoot,proto3" json:"supply_commitment_root,omitempty"` - // The txid of the anchor transaction that commits to the supply - // commitment for the specified asset. - AnchorTxid string `protobuf:"bytes,2,opt,name=anchor_txid,json=anchorTxid,proto3" json:"anchor_txid,omitempty"` - // The output index of the anchor transaction that commits to the supply - // commitment for the specified asset. - AnchorTxOutIdx uint32 `protobuf:"varint,3,opt,name=anchor_tx_out_idx,json=anchorTxOutIdx,proto3" json:"anchor_tx_out_idx,omitempty"` - // The transaction output taproot internal key of the anchor transaction - // that commits to the supply commitment for the specified asset. - AnchorTxOutInternalKey []byte `protobuf:"bytes,4,opt,name=anchor_tx_out_internal_key,json=anchorTxOutInternalKey,proto3" json:"anchor_tx_out_internal_key,omitempty"` - // The height of the block at which the supply commitment was anchored. - BlockHeight uint32 `protobuf:"varint,5,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - // The hash of the block at which the supply commitment was anchored. - BlockHash []byte `protobuf:"bytes,6,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - // The index of the transaction in the block that commits to the supply - // commitment. - BlockTxIndex uint32 `protobuf:"varint,7,opt,name=block_tx_index,json=blockTxIndex,proto3" json:"block_tx_index,omitempty"` + // The supply commitment chain data that contains both the commitment and + // chain proof information. + ChainData *SupplyCommitChainData `protobuf:"bytes,1,opt,name=chain_data,json=chainData,proto3" json:"chain_data,omitempty"` // The total number of satoshis in on-chain fees paid by the supply // commitment transaction. - TxChainFeesSats int64 `protobuf:"varint,8,opt,name=tx_chain_fees_sats,json=txChainFeesSats,proto3" json:"tx_chain_fees_sats,omitempty"` + TxChainFeesSats int64 `protobuf:"varint,2,opt,name=tx_chain_fees_sats,json=txChainFeesSats,proto3" json:"tx_chain_fees_sats,omitempty"` // The root of the issuance tree for the specified asset. - IssuanceSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,9,opt,name=issuance_subtree_root,json=issuanceSubtreeRoot,proto3" json:"issuance_subtree_root,omitempty"` + IssuanceSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,3,opt,name=issuance_subtree_root,json=issuanceSubtreeRoot,proto3" json:"issuance_subtree_root,omitempty"` // The root of the burn tree for the specified asset. - BurnSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,10,opt,name=burn_subtree_root,json=burnSubtreeRoot,proto3" json:"burn_subtree_root,omitempty"` + BurnSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,4,opt,name=burn_subtree_root,json=burnSubtreeRoot,proto3" json:"burn_subtree_root,omitempty"` // The root of the ignore tree for the specified asset. - IgnoreSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,11,opt,name=ignore_subtree_root,json=ignoreSubtreeRoot,proto3" json:"ignore_subtree_root,omitempty"` - // Inclusion proofs for each issuance leaf key provided in the request. - // Each entry corresponds to the key at the same index in - // `issuance_leaf_keys`. - IssuanceLeafInclusionProofs [][]byte `protobuf:"bytes,12,rep,name=issuance_leaf_inclusion_proofs,json=issuanceLeafInclusionProofs,proto3" json:"issuance_leaf_inclusion_proofs,omitempty"` - // Inclusion proofs for each burn leaf key provided in the request. - // Each entry corresponds to the key at the same index in `burn_leaf_keys`. - BurnLeafInclusionProofs [][]byte `protobuf:"bytes,13,rep,name=burn_leaf_inclusion_proofs,json=burnLeafInclusionProofs,proto3" json:"burn_leaf_inclusion_proofs,omitempty"` - // Inclusion proofs for each ignored leaf key provided in the request. - // Each entry corresponds to the key at the same index in - // `ignore_leaf_keys`. - IgnoreLeafInclusionProofs [][]byte `protobuf:"bytes,14,rep,name=ignore_leaf_inclusion_proofs,json=ignoreLeafInclusionProofs,proto3" json:"ignore_leaf_inclusion_proofs,omitempty"` + IgnoreSubtreeRoot *SupplyCommitSubtreeRoot `protobuf:"bytes,5,opt,name=ignore_subtree_root,json=ignoreSubtreeRoot,proto3" json:"ignore_subtree_root,omitempty"` + // The issuance leaves that were added by this supply commitment. Does not + // include leaves that were already present in the issuance subtree before + // the block height at which this supply commitment was anchored. + IssuanceLeaves []*SupplyLeafEntry `protobuf:"bytes,6,rep,name=issuance_leaves,json=issuanceLeaves,proto3" json:"issuance_leaves,omitempty"` + // The burn leaves that were added by this supply commitment. Does not + // include leaves that were already present in the burn subtree before + // the block height at which this supply commitment was anchored. + BurnLeaves []*SupplyLeafEntry `protobuf:"bytes,7,rep,name=burn_leaves,json=burnLeaves,proto3" json:"burn_leaves,omitempty"` + // The ignore leaves that were added by this supply commitment. Does not + // include leaves that were already present in the ignore subtree before + // the block height at which this supply commitment was anchored. + IgnoreLeaves []*SupplyLeafEntry `protobuf:"bytes,8,rep,name=ignore_leaves,json=ignoreLeaves,proto3" json:"ignore_leaves,omitempty"` + // The total outstanding supply of the asset after applying all the supply + // changes (issuance, burn, ignore) included in this supply commitment. + TotalOutstandingSupply uint64 `protobuf:"varint,9,opt,name=total_outstanding_supply,json=totalOutstandingSupply,proto3" json:"total_outstanding_supply,omitempty"` + // The outpoint of the previous commitment that this new commitment is + // spending. This must be set unless this is the very first supply + // commitment of a grouped asset. + SpentCommitmentOutpoint *taprpc.OutPoint `protobuf:"bytes,10,opt,name=spent_commitment_outpoint,json=spentCommitmentOutpoint,proto3" json:"spent_commitment_outpoint,omitempty"` } func (x *FetchSupplyCommitResponse) Reset() { @@ -3927,100 +3959,72 @@ func (*FetchSupplyCommitResponse) Descriptor() ([]byte, []int) { return file_universerpc_universe_proto_rawDescGZIP(), []int{57} } -func (x *FetchSupplyCommitResponse) GetSupplyCommitmentRoot() *MerkleSumNode { +func (x *FetchSupplyCommitResponse) GetChainData() *SupplyCommitChainData { if x != nil { - return x.SupplyCommitmentRoot + return x.ChainData } return nil } -func (x *FetchSupplyCommitResponse) GetAnchorTxid() string { - if x != nil { - return x.AnchorTxid - } - return "" -} - -func (x *FetchSupplyCommitResponse) GetAnchorTxOutIdx() uint32 { +func (x *FetchSupplyCommitResponse) GetTxChainFeesSats() int64 { if x != nil { - return x.AnchorTxOutIdx + return x.TxChainFeesSats } return 0 } -func (x *FetchSupplyCommitResponse) GetAnchorTxOutInternalKey() []byte { +func (x *FetchSupplyCommitResponse) GetIssuanceSubtreeRoot() *SupplyCommitSubtreeRoot { if x != nil { - return x.AnchorTxOutInternalKey + return x.IssuanceSubtreeRoot } return nil } -func (x *FetchSupplyCommitResponse) GetBlockHeight() uint32 { - if x != nil { - return x.BlockHeight - } - return 0 -} - -func (x *FetchSupplyCommitResponse) GetBlockHash() []byte { +func (x *FetchSupplyCommitResponse) GetBurnSubtreeRoot() *SupplyCommitSubtreeRoot { if x != nil { - return x.BlockHash + return x.BurnSubtreeRoot } return nil } -func (x *FetchSupplyCommitResponse) GetBlockTxIndex() uint32 { - if x != nil { - return x.BlockTxIndex - } - return 0 -} - -func (x *FetchSupplyCommitResponse) GetTxChainFeesSats() int64 { - if x != nil { - return x.TxChainFeesSats - } - return 0 -} - -func (x *FetchSupplyCommitResponse) GetIssuanceSubtreeRoot() *SupplyCommitSubtreeRoot { +func (x *FetchSupplyCommitResponse) GetIgnoreSubtreeRoot() *SupplyCommitSubtreeRoot { if x != nil { - return x.IssuanceSubtreeRoot + return x.IgnoreSubtreeRoot } return nil } -func (x *FetchSupplyCommitResponse) GetBurnSubtreeRoot() *SupplyCommitSubtreeRoot { +func (x *FetchSupplyCommitResponse) GetIssuanceLeaves() []*SupplyLeafEntry { if x != nil { - return x.BurnSubtreeRoot + return x.IssuanceLeaves } return nil } -func (x *FetchSupplyCommitResponse) GetIgnoreSubtreeRoot() *SupplyCommitSubtreeRoot { +func (x *FetchSupplyCommitResponse) GetBurnLeaves() []*SupplyLeafEntry { if x != nil { - return x.IgnoreSubtreeRoot + return x.BurnLeaves } return nil } -func (x *FetchSupplyCommitResponse) GetIssuanceLeafInclusionProofs() [][]byte { +func (x *FetchSupplyCommitResponse) GetIgnoreLeaves() []*SupplyLeafEntry { if x != nil { - return x.IssuanceLeafInclusionProofs + return x.IgnoreLeaves } return nil } -func (x *FetchSupplyCommitResponse) GetBurnLeafInclusionProofs() [][]byte { +func (x *FetchSupplyCommitResponse) GetTotalOutstandingSupply() uint64 { if x != nil { - return x.BurnLeafInclusionProofs + return x.TotalOutstandingSupply } - return nil + return 0 } -func (x *FetchSupplyCommitResponse) GetIgnoreLeafInclusionProofs() [][]byte { +func (x *FetchSupplyCommitResponse) GetSpentCommitmentOutpoint() *taprpc.OutPoint { if x != nil { - return x.IgnoreLeafInclusionProofs + return x.SpentCommitmentOutpoint } return nil } @@ -4042,6 +4046,15 @@ type FetchSupplyLeavesRequest struct { BlockHeightStart uint32 `protobuf:"varint,3,opt,name=block_height_start,json=blockHeightStart,proto3" json:"block_height_start,omitempty"` // The end block height for the range of supply leaves to fetch. BlockHeightEnd uint32 `protobuf:"varint,4,opt,name=block_height_end,json=blockHeightEnd,proto3" json:"block_height_end,omitempty"` + // Optional: A list of issuance leaf keys. For each key in this list, + // the endpoint will generate and return an inclusion proof. + IssuanceLeafKeys [][]byte `protobuf:"bytes,5,rep,name=issuance_leaf_keys,json=issuanceLeafKeys,proto3" json:"issuance_leaf_keys,omitempty"` + // Optional: A list of burn leaf keys. For each key in this list, + // the endpoint will generate and return an inclusion proof. + BurnLeafKeys [][]byte `protobuf:"bytes,6,rep,name=burn_leaf_keys,json=burnLeafKeys,proto3" json:"burn_leaf_keys,omitempty"` + // Optional: A list of ignore leaf keys. For each key in this list, the + // endpoint will generate and return an inclusion proof. + IgnoreLeafKeys [][]byte `protobuf:"bytes,7,rep,name=ignore_leaf_keys,json=ignoreLeafKeys,proto3" json:"ignore_leaf_keys,omitempty"` } func (x *FetchSupplyLeavesRequest) Reset() { @@ -4111,6 +4124,27 @@ func (x *FetchSupplyLeavesRequest) GetBlockHeightEnd() uint32 { return 0 } +func (x *FetchSupplyLeavesRequest) GetIssuanceLeafKeys() [][]byte { + if x != nil { + return x.IssuanceLeafKeys + } + return nil +} + +func (x *FetchSupplyLeavesRequest) GetBurnLeafKeys() [][]byte { + if x != nil { + return x.BurnLeafKeys + } + return nil +} + +func (x *FetchSupplyLeavesRequest) GetIgnoreLeafKeys() [][]byte { + if x != nil { + return x.IgnoreLeafKeys + } + return nil +} + type isFetchSupplyLeavesRequest_GroupKey interface { isFetchSupplyLeavesRequest_GroupKey() } @@ -4284,6 +4318,17 @@ type FetchSupplyLeavesResponse struct { IssuanceLeaves []*SupplyLeafEntry `protobuf:"bytes,1,rep,name=issuance_leaves,json=issuanceLeaves,proto3" json:"issuance_leaves,omitempty"` BurnLeaves []*SupplyLeafEntry `protobuf:"bytes,2,rep,name=burn_leaves,json=burnLeaves,proto3" json:"burn_leaves,omitempty"` IgnoreLeaves []*SupplyLeafEntry `protobuf:"bytes,3,rep,name=ignore_leaves,json=ignoreLeaves,proto3" json:"ignore_leaves,omitempty"` + // Inclusion proofs for each issuance leaf key provided in the request. + // Each entry corresponds to the key at the same index in + // `issuance_leaf_keys`. + IssuanceLeafInclusionProofs [][]byte `protobuf:"bytes,4,rep,name=issuance_leaf_inclusion_proofs,json=issuanceLeafInclusionProofs,proto3" json:"issuance_leaf_inclusion_proofs,omitempty"` + // Inclusion proofs for each burn leaf key provided in the request. + // Each entry corresponds to the key at the same index in `burn_leaf_keys`. + BurnLeafInclusionProofs [][]byte `protobuf:"bytes,5,rep,name=burn_leaf_inclusion_proofs,json=burnLeafInclusionProofs,proto3" json:"burn_leaf_inclusion_proofs,omitempty"` + // Inclusion proofs for each ignored leaf key provided in the request. + // Each entry corresponds to the key at the same index in + // `ignore_leaf_keys`. + IgnoreLeafInclusionProofs [][]byte `protobuf:"bytes,6,rep,name=ignore_leaf_inclusion_proofs,json=ignoreLeafInclusionProofs,proto3" json:"ignore_leaf_inclusion_proofs,omitempty"` } func (x *FetchSupplyLeavesResponse) Reset() { @@ -4339,6 +4384,344 @@ func (x *FetchSupplyLeavesResponse) GetIgnoreLeaves() []*SupplyLeafEntry { return nil } +func (x *FetchSupplyLeavesResponse) GetIssuanceLeafInclusionProofs() [][]byte { + if x != nil { + return x.IssuanceLeafInclusionProofs + } + return nil +} + +func (x *FetchSupplyLeavesResponse) GetBurnLeafInclusionProofs() [][]byte { + if x != nil { + return x.BurnLeafInclusionProofs + } + return nil +} + +func (x *FetchSupplyLeavesResponse) GetIgnoreLeafInclusionProofs() [][]byte { + if x != nil { + return x.IgnoreLeafInclusionProofs + } + return nil +} + +// SupplyCommitChainData represents the on-chain artifacts for a supply +// commitment update. +type SupplyCommitChainData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The raw transaction that created the root commitment. + Txn []byte `protobuf:"bytes,1,opt,name=txn,proto3" json:"txn,omitempty"` + // The index of the output in the transaction where the commitment resides. + TxOutIdx uint32 `protobuf:"varint,2,opt,name=tx_out_idx,json=txOutIdx,proto3" json:"tx_out_idx,omitempty"` + // The internal key used to create the commitment output. + InternalKey []byte `protobuf:"bytes,3,opt,name=internal_key,json=internalKey,proto3" json:"internal_key,omitempty"` + // The taproot output key used to create the commitment output. + OutputKey []byte `protobuf:"bytes,4,opt,name=output_key,json=outputKey,proto3" json:"output_key,omitempty"` + // The root hash of the supply tree that contains the set of + // sub-commitments. The sum value of this tree is the outstanding supply + // value. + SupplyRootHash []byte `protobuf:"bytes,5,opt,name=supply_root_hash,json=supplyRootHash,proto3" json:"supply_root_hash,omitempty"` + // The sum value of the supply root tree, representing the outstanding + // supply amount. + SupplyRootSum uint64 `protobuf:"varint,6,opt,name=supply_root_sum,json=supplyRootSum,proto3" json:"supply_root_sum,omitempty"` + // The block header of the block that contains the supply commitment + // transaction. + BlockHeader []byte `protobuf:"bytes,7,opt,name=block_header,json=blockHeader,proto3" json:"block_header,omitempty"` + // The hash of the block that contains the commitment. + BlockHash []byte `protobuf:"bytes,8,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + // The block height of the block that contains the supply commitment + // transaction. + BlockHeight uint32 `protobuf:"varint,9,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + // The merkle proof that proves that the supply commitment transaction is + // included in the block. + TxBlockMerkleProof []byte `protobuf:"bytes,10,opt,name=tx_block_merkle_proof,json=txBlockMerkleProof,proto3" json:"tx_block_merkle_proof,omitempty"` + // The index of the supply commitment transaction in the block. + TxIndex uint32 `protobuf:"varint,11,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` +} + +func (x *SupplyCommitChainData) Reset() { + *x = SupplyCommitChainData{} + if protoimpl.UnsafeEnabled { + mi := &file_universerpc_universe_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SupplyCommitChainData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SupplyCommitChainData) ProtoMessage() {} + +func (x *SupplyCommitChainData) ProtoReflect() protoreflect.Message { + mi := &file_universerpc_universe_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SupplyCommitChainData.ProtoReflect.Descriptor instead. +func (*SupplyCommitChainData) Descriptor() ([]byte, []int) { + return file_universerpc_universe_proto_rawDescGZIP(), []int{62} +} + +func (x *SupplyCommitChainData) GetTxn() []byte { + if x != nil { + return x.Txn + } + return nil +} + +func (x *SupplyCommitChainData) GetTxOutIdx() uint32 { + if x != nil { + return x.TxOutIdx + } + return 0 +} + +func (x *SupplyCommitChainData) GetInternalKey() []byte { + if x != nil { + return x.InternalKey + } + return nil +} + +func (x *SupplyCommitChainData) GetOutputKey() []byte { + if x != nil { + return x.OutputKey + } + return nil +} + +func (x *SupplyCommitChainData) GetSupplyRootHash() []byte { + if x != nil { + return x.SupplyRootHash + } + return nil +} + +func (x *SupplyCommitChainData) GetSupplyRootSum() uint64 { + if x != nil { + return x.SupplyRootSum + } + return 0 +} + +func (x *SupplyCommitChainData) GetBlockHeader() []byte { + if x != nil { + return x.BlockHeader + } + return nil +} + +func (x *SupplyCommitChainData) GetBlockHash() []byte { + if x != nil { + return x.BlockHash + } + return nil +} + +func (x *SupplyCommitChainData) GetBlockHeight() uint32 { + if x != nil { + return x.BlockHeight + } + return 0 +} + +func (x *SupplyCommitChainData) GetTxBlockMerkleProof() []byte { + if x != nil { + return x.TxBlockMerkleProof + } + return nil +} + +func (x *SupplyCommitChainData) GetTxIndex() uint32 { + if x != nil { + return x.TxIndex + } + return 0 +} + +type InsertSupplyCommitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique identifier for the target asset group whose supply commitment + // is being inserted. + // + // Types that are assignable to GroupKey: + // + // *InsertSupplyCommitRequest_GroupKeyBytes + // *InsertSupplyCommitRequest_GroupKeyStr + GroupKey isInsertSupplyCommitRequest_GroupKey `protobuf_oneof:"group_key"` + // The supply commitment chain data that contains both the commitment and + // chain proof information. + ChainData *SupplyCommitChainData `protobuf:"bytes,3,opt,name=chain_data,json=chainData,proto3" json:"chain_data,omitempty"` + // The outpoint of the previous commitment that this new commitment is + // spending. This must be set unless this is the very first supply + // commitment of a grouped asset. + SpentCommitmentOutpoint *taprpc.OutPoint `protobuf:"bytes,4,opt,name=spent_commitment_outpoint,json=spentCommitmentOutpoint,proto3" json:"spent_commitment_outpoint,omitempty"` + // The supply leaves that represent the supply changes for the asset group. + IssuanceLeaves []*SupplyLeafEntry `protobuf:"bytes,5,rep,name=issuance_leaves,json=issuanceLeaves,proto3" json:"issuance_leaves,omitempty"` + BurnLeaves []*SupplyLeafEntry `protobuf:"bytes,6,rep,name=burn_leaves,json=burnLeaves,proto3" json:"burn_leaves,omitempty"` + IgnoreLeaves []*SupplyLeafEntry `protobuf:"bytes,7,rep,name=ignore_leaves,json=ignoreLeaves,proto3" json:"ignore_leaves,omitempty"` +} + +func (x *InsertSupplyCommitRequest) Reset() { + *x = InsertSupplyCommitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_universerpc_universe_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InsertSupplyCommitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InsertSupplyCommitRequest) ProtoMessage() {} + +func (x *InsertSupplyCommitRequest) ProtoReflect() protoreflect.Message { + mi := &file_universerpc_universe_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InsertSupplyCommitRequest.ProtoReflect.Descriptor instead. +func (*InsertSupplyCommitRequest) Descriptor() ([]byte, []int) { + return file_universerpc_universe_proto_rawDescGZIP(), []int{63} +} + +func (m *InsertSupplyCommitRequest) GetGroupKey() isInsertSupplyCommitRequest_GroupKey { + if m != nil { + return m.GroupKey + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetGroupKeyBytes() []byte { + if x, ok := x.GetGroupKey().(*InsertSupplyCommitRequest_GroupKeyBytes); ok { + return x.GroupKeyBytes + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetGroupKeyStr() string { + if x, ok := x.GetGroupKey().(*InsertSupplyCommitRequest_GroupKeyStr); ok { + return x.GroupKeyStr + } + return "" +} + +func (x *InsertSupplyCommitRequest) GetChainData() *SupplyCommitChainData { + if x != nil { + return x.ChainData + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetSpentCommitmentOutpoint() *taprpc.OutPoint { + if x != nil { + return x.SpentCommitmentOutpoint + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetIssuanceLeaves() []*SupplyLeafEntry { + if x != nil { + return x.IssuanceLeaves + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetBurnLeaves() []*SupplyLeafEntry { + if x != nil { + return x.BurnLeaves + } + return nil +} + +func (x *InsertSupplyCommitRequest) GetIgnoreLeaves() []*SupplyLeafEntry { + if x != nil { + return x.IgnoreLeaves + } + return nil +} + +type isInsertSupplyCommitRequest_GroupKey interface { + isInsertSupplyCommitRequest_GroupKey() +} + +type InsertSupplyCommitRequest_GroupKeyBytes struct { + // The 32-byte asset group key specified as raw bytes (gRPC only). + GroupKeyBytes []byte `protobuf:"bytes,1,opt,name=group_key_bytes,json=groupKeyBytes,proto3,oneof"` +} + +type InsertSupplyCommitRequest_GroupKeyStr struct { + // The 32-byte asset group key encoded as hex string (use this for + // REST). + GroupKeyStr string `protobuf:"bytes,2,opt,name=group_key_str,json=groupKeyStr,proto3,oneof"` +} + +func (*InsertSupplyCommitRequest_GroupKeyBytes) isInsertSupplyCommitRequest_GroupKey() {} + +func (*InsertSupplyCommitRequest_GroupKeyStr) isInsertSupplyCommitRequest_GroupKey() {} + +type InsertSupplyCommitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *InsertSupplyCommitResponse) Reset() { + *x = InsertSupplyCommitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_universerpc_universe_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InsertSupplyCommitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InsertSupplyCommitResponse) ProtoMessage() {} + +func (x *InsertSupplyCommitResponse) ProtoReflect() protoreflect.Message { + mi := &file_universerpc_universe_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InsertSupplyCommitResponse.ProtoReflect.Descriptor instead. +func (*InsertSupplyCommitResponse) Descriptor() ([]byte, []int) { + return file_universerpc_universe_proto_rawDescGZIP(), []int{64} +} + var File_universerpc_universe_proto protoreflect.FileDescriptor var file_universerpc_universe_proto_rawDesc = []byte{ @@ -4781,101 +5164,105 @@ var file_universerpc_universe_proto_rawDesc = []byte{ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x1c, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf5, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa8, 0x02, 0x0a, 0x18, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, - 0x53, 0x74, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x5f, - 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x10, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x62, 0x75, 0x72, 0x6e, 0x4c, - 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, - 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0xd6, - 0x01, 0x0a, 0x17, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, - 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x37, - 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, - 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x72, - 0x6f, 0x6f, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x75, 0x70, 0x70, 0x6c, - 0x79, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x72, 0x65, - 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x73, 0x75, 0x70, 0x70, - 0x6c, 0x79, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x73, - 0x75, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x72, 0x65, 0x65, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xcf, 0x06, 0x0a, 0x19, 0x46, 0x65, 0x74, 0x63, - 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x16, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x5f, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x14, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, - 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6e, 0x63, 0x68, 0x6f, - 0x72, 0x5f, 0x74, 0x78, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x6e, - 0x63, 0x68, 0x6f, 0x72, 0x54, 0x78, 0x69, 0x64, 0x12, 0x29, 0x0a, 0x11, 0x61, 0x6e, 0x63, 0x68, - 0x6f, 0x72, 0x5f, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x61, 0x6e, 0x63, 0x68, 0x6f, 0x72, 0x54, 0x78, 0x4f, 0x75, 0x74, - 0x49, 0x64, 0x78, 0x12, 0x3a, 0x0a, 0x1a, 0x61, 0x6e, 0x63, 0x68, 0x6f, 0x72, 0x5f, 0x74, 0x78, - 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x61, 0x6e, 0x63, 0x68, 0x6f, 0x72, 0x54, - 0x78, 0x4f, 0x75, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, - 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, - 0x68, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x78, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x54, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x78, 0x5f, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x66, 0x65, 0x65, 0x73, 0x5f, 0x73, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x78, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x65, 0x65, 0x73, - 0x53, 0x61, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x15, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, - 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, - 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x75, - 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x13, 0x69, 0x73, 0x73, 0x75, 0x61, - 0x6e, 0x63, 0x65, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x50, - 0x0a, 0x11, 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x75, 0x6e, 0x69, 0x76, - 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, - 0x0f, 0x62, 0x75, 0x72, 0x6e, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x54, 0x0a, 0x13, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, - 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x53, 0x74, 0x72, 0x12, 0x3b, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x6f, 0x75, + 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, + 0x61, 0x70, 0x72, 0x70, 0x63, 0x2e, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x01, + 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x46, 0x0a, 0x15, 0x73, 0x70, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x74, 0x61, 0x70, 0x72, 0x70, 0x63, 0x2e, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x48, 0x01, 0x52, 0x13, 0x73, 0x70, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x79, + 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x09, + 0x76, 0x65, 0x72, 0x79, 0x46, 0x69, 0x72, 0x73, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x22, 0xd6, 0x01, 0x0a, 0x17, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, + 0x70, 0x63, 0x2e, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x75, + 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, + 0x54, 0x72, 0x65, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x73, + 0x75, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x18, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x72, 0x65, 0x65, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xde, 0x05, 0x0a, 0x19, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, + 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x12, 0x74, + 0x78, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x66, 0x65, 0x65, 0x73, 0x5f, 0x73, 0x61, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x78, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x46, 0x65, 0x65, 0x73, 0x53, 0x61, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x15, 0x69, 0x73, 0x73, 0x75, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, + 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x13, 0x69, + 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x50, 0x0a, 0x11, 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, + 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, - 0x6f, 0x6f, 0x74, 0x52, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x62, 0x74, 0x72, - 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, - 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, - 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x62, - 0x75, 0x72, 0x6e, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x17, 0x62, 0x75, 0x72, 0x6e, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x19, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, - 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x00, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x24, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x45, 0x6e, 0x64, 0x42, 0x0b, + 0x6f, 0x6f, 0x74, 0x52, 0x0f, 0x62, 0x75, 0x72, 0x6e, 0x53, 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, + 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x75, 0x62, 0x74, + 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x53, + 0x75, 0x62, 0x74, 0x72, 0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x45, 0x0a, 0x0f, 0x69, 0x73, + 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0e, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x62, 0x75, 0x72, 0x6e, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, + 0x12, 0x41, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, + 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, + 0x76, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6f, 0x75, 0x74, + 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4f, 0x75, 0x74, 0x73, + 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x4c, 0x0a, + 0x19, 0x73, 0x70, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x74, 0x61, 0x70, 0x72, 0x70, 0x63, 0x2e, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x17, 0x73, 0x70, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0xcd, 0x02, 0x0a, 0x18, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x73, 0x74, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x45, 0x6e, 0x64, + 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x61, + 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x69, 0x73, + 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, + 0x0a, 0x0e, 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x62, 0x75, 0x72, 0x6e, 0x4c, 0x65, 0x61, 0x66, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6c, + 0x65, 0x61, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x7c, 0x0a, 0x0d, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, @@ -4896,7 +5283,7 @@ var file_universerpc_universe_proto_rawDesc = []byte{ 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x61, 0x77, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x72, 0x61, 0x77, 0x4c, 0x65, 0x61, 0x66, 0x22, 0xe4, 0x01, 0x0a, 0x19, + 0x28, 0x0c, 0x52, 0x07, 0x72, 0x61, 0x77, 0x4c, 0x65, 0x61, 0x66, 0x22, 0xa7, 0x03, 0x0a, 0x19, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, @@ -4911,173 +5298,248 @@ var file_universerpc_universe_proto_rawDesc = []byte{ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, 0x76, - 0x65, 0x73, 0x2a, 0x59, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, - 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x53, 0x53, 0x55, 0x41, 0x4e, - 0x43, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x39, 0x0a, - 0x10, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x49, 0x53, 0x53, 0x55, 0x41, 0x4e, - 0x43, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x59, 0x4e, - 0x43, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x2a, 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x6f, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x0c, 0x53, - 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x16, 0x0a, - 0x12, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x4e, - 0x41, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, - 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x44, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, - 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, - 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x53, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, - 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x50, 0x52, - 0x4f, 0x4f, 0x46, 0x53, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, - 0x59, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, - 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x4f, - 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4c, 0x59, 0x10, 0x07, 0x2a, 0x40, 0x0a, 0x0d, - 0x53, 0x6f, 0x72, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, - 0x12, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x41, 0x53, 0x43, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x44, 0x49, - 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x53, 0x43, 0x10, 0x01, 0x2a, 0x5f, - 0x0a, 0x0f, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, - 0x54, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x49, 0x4c, 0x54, - 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, - 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, - 0x54, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x10, 0x02, 0x32, - 0x8f, 0x10, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0e, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, - 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, - 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x41, 0x73, 0x73, 0x65, 0x74, - 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x1d, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, - 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, - 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x73, - 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, - 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x75, 0x6e, 0x69, - 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, - 0x61, 0x66, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, - 0x0b, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x0f, 0x2e, 0x75, - 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x44, 0x1a, 0x1e, 0x2e, - 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, - 0x74, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, - 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x18, 0x2e, 0x75, 0x6e, - 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x65, 0x4b, 0x65, 0x79, 0x1a, 0x1f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x17, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x1a, 0x1f, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6c, + 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, 0x69, 0x73, 0x73, 0x75, + 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x62, 0x75, 0x72, 0x6e, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x17, 0x62, 0x75, 0x72, + 0x6e, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6c, + 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x19, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x22, 0x8e, 0x03, 0x0a, 0x15, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x78, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x74, 0x78, + 0x6e, 0x12, 0x1c, 0x0a, 0x0a, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x74, 0x78, 0x4f, 0x75, 0x74, 0x49, 0x64, 0x78, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, + 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x75, 0x70, + 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x26, 0x0a, 0x0f, 0x73, + 0x75, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x6f, 0x74, + 0x53, 0x75, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x74, 0x78, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x74, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, + 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xd2, 0x03, 0x0a, 0x19, 0x49, 0x6e, 0x73, 0x65, 0x72, + 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, + 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x24, + 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x53, 0x74, 0x72, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x19, 0x73, 0x70, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x75, 0x74, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x61, 0x70, + 0x72, 0x70, 0x63, 0x2e, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x17, 0x73, 0x70, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x75, 0x74, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0f, 0x69, 0x73, 0x73, 0x75, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, + 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x69, 0x73, + 0x73, 0x75, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0b, + 0x62, 0x75, 0x72, 0x6e, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x62, 0x75, 0x72, 0x6e, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0d, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x42, 0x0b, + 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x1c, 0x0a, 0x1a, 0x49, + 0x6e, 0x73, 0x65, 0x72, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x59, 0x0a, 0x09, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x53, 0x53, 0x55, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, + 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, + 0x45, 0x52, 0x10, 0x02, 0x2a, 0x39, 0x0a, 0x10, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, + 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x59, 0x4e, 0x43, + 0x5f, 0x49, 0x53, 0x53, 0x55, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x2a, + 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x6f, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x4e, 0x4f, + 0x4e, 0x45, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, + 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x41, 0x53, + 0x53, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x4f, + 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4e, 0x43, + 0x53, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, + 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x53, 0x10, 0x05, 0x12, 0x1a, 0x0a, + 0x16, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x53, 0x49, 0x53, + 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x4f, 0x52, + 0x54, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4c, + 0x59, 0x10, 0x07, 0x2a, 0x40, 0x0a, 0x0d, 0x53, 0x6f, 0x72, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x44, 0x49, 0x52, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x53, 0x43, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, + 0x53, 0x4f, 0x52, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x53, 0x43, 0x10, 0x01, 0x2a, 0x5f, 0x0a, 0x0f, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x49, 0x4c, 0x54, + 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, + 0x17, 0x0a, 0x13, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, + 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x46, 0x49, 0x4c, 0x54, + 0x45, 0x52, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, + 0x49, 0x42, 0x4c, 0x45, 0x10, 0x02, 0x32, 0xf6, 0x10, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, + 0x72, 0x70, 0x63, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x75, 0x6e, 0x69, 0x76, + 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x76, 0x65, 0x72, + 0x73, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, + 0x0a, 0x0a, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x1d, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, + 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, + 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x1b, + 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, + 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, + 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1c, + 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1f, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, + 0x0d, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4a, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x75, - 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, - 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, - 0x63, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, - 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x12, 0x18, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, - 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, - 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, - 0x15, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x29, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, - 0x65, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, - 0x13, 0x41, 0x64, 0x64, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, + 0x65, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, + 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, + 0x76, 0x65, 0x73, 0x12, 0x0f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, + 0x63, 0x2e, 0x49, 0x44, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, + 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x12, 0x18, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, + 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x1a, 0x1f, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, + 0x0b, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x17, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x1a, 0x1f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, + 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, + 0x63, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x2e, 0x75, 0x6e, 0x69, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x43, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x12, + 0x18, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x75, 0x6e, 0x69, 0x76, + 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x64, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x29, 0x2e, + 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x75, 0x6e, + 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x64, 0x64, 0x46, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x64, 0x64, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, - 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x64, 0x64, 0x46, - 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x12, 0x2a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, - 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0d, 0x55, 0x6e, - 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x19, 0x2e, 0x75, 0x6e, - 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, - 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x73, 0x73, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, - 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x1a, 0x1f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, - 0x63, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, - 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, - 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x17, 0x53, 0x65, 0x74, 0x46, 0x65, 0x64, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x2b, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, - 0x53, 0x65, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, - 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x74, - 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x19, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x75, 0x6e, 0x69, 0x76, - 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, - 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, + 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, + 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x46, 0x0a, 0x0d, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x19, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, + 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1f, 0x2e, 0x75, 0x6e, 0x69, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x75, 0x6e, 0x69, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x75, 0x6e, + 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, + 0x17, 0x53, 0x65, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, + 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x74, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x19, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x13, 0x49, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x67, + 0x12, 0x2d, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, + 0x6e, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, + 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x68, 0x0a, 0x13, 0x49, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4f, 0x75, + 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, - 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x65, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x26, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, - 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x25, - 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x12, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, + 0x26, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, + 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x62, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x25, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, - 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, - 0x65, 0x73, 0x12, 0x25, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, - 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x75, 0x6e, 0x69, 0x76, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x75, + 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, + 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, - 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6c, 0x69, 0x67, 0x68, 0x74, 0x6e, 0x69, 0x6e, 0x67, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x74, 0x61, - 0x70, 0x72, 0x6f, 0x6f, 0x74, 0x2d, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x61, 0x70, - 0x72, 0x70, 0x63, 0x2f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x12, 0x49, 0x6e, 0x73, 0x65, + 0x72, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x26, + 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x65, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, + 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x6e, 0x69, 0x6e, 0x67, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x74, 0x61, 0x70, 0x72, + 0x6f, 0x6f, 0x74, 0x2d, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x61, 0x70, 0x72, 0x70, + 0x63, 0x2f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x65, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5093,7 +5555,7 @@ func file_universerpc_universe_proto_rawDescGZIP() []byte { } var file_universerpc_universe_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_universerpc_universe_proto_msgTypes = make([]protoimpl.MessageInfo, 64) +var file_universerpc_universe_proto_msgTypes = make([]protoimpl.MessageInfo, 67) var file_universerpc_universe_proto_goTypes = []any{ (ProofType)(0), // 0: universerpc.ProofType (UniverseSyncMode)(0), // 1: universerpc.UniverseSyncMode @@ -5162,140 +5624,157 @@ var file_universerpc_universe_proto_goTypes = []any{ (*SupplyLeafKey)(nil), // 64: universerpc.SupplyLeafKey (*SupplyLeafEntry)(nil), // 65: universerpc.SupplyLeafEntry (*FetchSupplyLeavesResponse)(nil), // 66: universerpc.FetchSupplyLeavesResponse - nil, // 67: universerpc.UniverseRoot.AmountsByAssetIdEntry - nil, // 68: universerpc.AssetRootResponse.UniverseRootsEntry - (*taprpc.Asset)(nil), // 69: taprpc.Asset - (*taprpc.AssetMeta)(nil), // 70: taprpc.AssetMeta - (*taprpc.GenesisReveal)(nil), // 71: taprpc.GenesisReveal - (*taprpc.GroupKeyReveal)(nil), // 72: taprpc.GroupKeyReveal - (taprpc.AssetType)(0), // 73: taprpc.AssetType - (*taprpc.AssetOutPoint)(nil), // 74: taprpc.AssetOutPoint + (*SupplyCommitChainData)(nil), // 67: universerpc.SupplyCommitChainData + (*InsertSupplyCommitRequest)(nil), // 68: universerpc.InsertSupplyCommitRequest + (*InsertSupplyCommitResponse)(nil), // 69: universerpc.InsertSupplyCommitResponse + nil, // 70: universerpc.UniverseRoot.AmountsByAssetIdEntry + nil, // 71: universerpc.AssetRootResponse.UniverseRootsEntry + (*taprpc.Asset)(nil), // 72: taprpc.Asset + (*taprpc.AssetMeta)(nil), // 73: taprpc.AssetMeta + (*taprpc.GenesisReveal)(nil), // 74: taprpc.GenesisReveal + (*taprpc.GroupKeyReveal)(nil), // 75: taprpc.GroupKeyReveal + (taprpc.AssetType)(0), // 76: taprpc.AssetType + (*taprpc.AssetOutPoint)(nil), // 77: taprpc.AssetOutPoint + (*taprpc.OutPoint)(nil), // 78: taprpc.OutPoint } var file_universerpc_universe_proto_depIdxs = []int32{ - 0, // 0: universerpc.MultiverseRootRequest.proof_type:type_name -> universerpc.ProofType - 9, // 1: universerpc.MultiverseRootRequest.specific_ids:type_name -> universerpc.ID - 8, // 2: universerpc.MultiverseRootResponse.multiverse_root:type_name -> universerpc.MerkleSumNode - 3, // 3: universerpc.AssetRootRequest.direction:type_name -> universerpc.SortDirection - 0, // 4: universerpc.ID.proof_type:type_name -> universerpc.ProofType - 9, // 5: universerpc.UniverseRoot.id:type_name -> universerpc.ID - 8, // 6: universerpc.UniverseRoot.mssmt_root:type_name -> universerpc.MerkleSumNode - 67, // 7: universerpc.UniverseRoot.amounts_by_asset_id:type_name -> universerpc.UniverseRoot.AmountsByAssetIdEntry - 68, // 8: universerpc.AssetRootResponse.universe_roots:type_name -> universerpc.AssetRootResponse.UniverseRootsEntry - 9, // 9: universerpc.AssetRootQuery.id:type_name -> universerpc.ID - 10, // 10: universerpc.QueryRootResponse.issuance_root:type_name -> universerpc.UniverseRoot - 10, // 11: universerpc.QueryRootResponse.transfer_root:type_name -> universerpc.UniverseRoot - 9, // 12: universerpc.DeleteRootQuery.id:type_name -> universerpc.ID - 16, // 13: universerpc.AssetKey.op:type_name -> universerpc.Outpoint - 9, // 14: universerpc.AssetLeafKeysRequest.id:type_name -> universerpc.ID - 3, // 15: universerpc.AssetLeafKeysRequest.direction:type_name -> universerpc.SortDirection - 17, // 16: universerpc.AssetLeafKeyResponse.asset_keys:type_name -> universerpc.AssetKey - 69, // 17: universerpc.AssetLeaf.asset:type_name -> taprpc.Asset - 20, // 18: universerpc.AssetLeafResponse.leaves:type_name -> universerpc.AssetLeaf - 9, // 19: universerpc.UniverseKey.id:type_name -> universerpc.ID - 17, // 20: universerpc.UniverseKey.leaf_key:type_name -> universerpc.AssetKey - 22, // 21: universerpc.AssetProofResponse.req:type_name -> universerpc.UniverseKey - 10, // 22: universerpc.AssetProofResponse.universe_root:type_name -> universerpc.UniverseRoot - 20, // 23: universerpc.AssetProofResponse.asset_leaf:type_name -> universerpc.AssetLeaf - 8, // 24: universerpc.AssetProofResponse.multiverse_root:type_name -> universerpc.MerkleSumNode - 24, // 25: universerpc.AssetProofResponse.issuance_data:type_name -> universerpc.IssuanceData - 70, // 26: universerpc.IssuanceData.meta_reveal:type_name -> taprpc.AssetMeta - 71, // 27: universerpc.IssuanceData.genesis_reveal:type_name -> taprpc.GenesisReveal - 72, // 28: universerpc.IssuanceData.group_key_reveal:type_name -> taprpc.GroupKeyReveal - 22, // 29: universerpc.AssetProof.key:type_name -> universerpc.UniverseKey - 20, // 30: universerpc.AssetProof.asset_leaf:type_name -> universerpc.AssetLeaf - 22, // 31: universerpc.PushProofRequest.key:type_name -> universerpc.UniverseKey - 35, // 32: universerpc.PushProofRequest.server:type_name -> universerpc.UniverseFederationServer - 22, // 33: universerpc.PushProofResponse.key:type_name -> universerpc.UniverseKey - 9, // 34: universerpc.SyncTarget.id:type_name -> universerpc.ID - 1, // 35: universerpc.SyncRequest.sync_mode:type_name -> universerpc.UniverseSyncMode - 30, // 36: universerpc.SyncRequest.sync_targets:type_name -> universerpc.SyncTarget - 10, // 37: universerpc.SyncedUniverse.old_asset_root:type_name -> universerpc.UniverseRoot - 10, // 38: universerpc.SyncedUniverse.new_asset_root:type_name -> universerpc.UniverseRoot - 20, // 39: universerpc.SyncedUniverse.new_asset_leaves:type_name -> universerpc.AssetLeaf - 32, // 40: universerpc.SyncResponse.synced_universes:type_name -> universerpc.SyncedUniverse - 35, // 41: universerpc.ListFederationServersResponse.servers:type_name -> universerpc.UniverseFederationServer - 35, // 42: universerpc.AddFederationServerRequest.servers:type_name -> universerpc.UniverseFederationServer - 35, // 43: universerpc.DeleteFederationServerRequest.servers:type_name -> universerpc.UniverseFederationServer - 4, // 44: universerpc.AssetStatsQuery.asset_type_filter:type_name -> universerpc.AssetTypeFilter - 2, // 45: universerpc.AssetStatsQuery.sort_by:type_name -> universerpc.AssetQuerySort - 3, // 46: universerpc.AssetStatsQuery.direction:type_name -> universerpc.SortDirection - 45, // 47: universerpc.AssetStatsSnapshot.group_anchor:type_name -> universerpc.AssetStatsAsset - 45, // 48: universerpc.AssetStatsSnapshot.asset:type_name -> universerpc.AssetStatsAsset - 73, // 49: universerpc.AssetStatsAsset.asset_type:type_name -> taprpc.AssetType - 44, // 50: universerpc.UniverseAssetStats.asset_stats:type_name -> universerpc.AssetStatsSnapshot - 49, // 51: universerpc.QueryEventsResponse.events:type_name -> universerpc.GroupedUniverseEvents - 52, // 52: universerpc.SetFederationSyncConfigRequest.global_sync_configs:type_name -> universerpc.GlobalFederationSyncConfig - 53, // 53: universerpc.SetFederationSyncConfigRequest.asset_sync_configs:type_name -> universerpc.AssetFederationSyncConfig - 0, // 54: universerpc.GlobalFederationSyncConfig.proof_type:type_name -> universerpc.ProofType - 9, // 55: universerpc.AssetFederationSyncConfig.id:type_name -> universerpc.ID - 9, // 56: universerpc.QueryFederationSyncConfigRequest.id:type_name -> universerpc.ID - 52, // 57: universerpc.QueryFederationSyncConfigResponse.global_sync_configs:type_name -> universerpc.GlobalFederationSyncConfig - 53, // 58: universerpc.QueryFederationSyncConfigResponse.asset_sync_configs:type_name -> universerpc.AssetFederationSyncConfig - 74, // 59: universerpc.IgnoreAssetOutPointRequest.asset_out_point:type_name -> taprpc.AssetOutPoint - 8, // 60: universerpc.IgnoreAssetOutPointResponse.leaf:type_name -> universerpc.MerkleSumNode - 8, // 61: universerpc.SupplyCommitSubtreeRoot.root_node:type_name -> universerpc.MerkleSumNode - 8, // 62: universerpc.FetchSupplyCommitResponse.supply_commitment_root:type_name -> universerpc.MerkleSumNode - 61, // 63: universerpc.FetchSupplyCommitResponse.issuance_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot - 61, // 64: universerpc.FetchSupplyCommitResponse.burn_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot - 61, // 65: universerpc.FetchSupplyCommitResponse.ignore_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot - 16, // 66: universerpc.SupplyLeafKey.outpoint:type_name -> universerpc.Outpoint - 64, // 67: universerpc.SupplyLeafEntry.leaf_key:type_name -> universerpc.SupplyLeafKey - 8, // 68: universerpc.SupplyLeafEntry.leaf_node:type_name -> universerpc.MerkleSumNode - 65, // 69: universerpc.FetchSupplyLeavesResponse.issuance_leaves:type_name -> universerpc.SupplyLeafEntry - 65, // 70: universerpc.FetchSupplyLeavesResponse.burn_leaves:type_name -> universerpc.SupplyLeafEntry - 65, // 71: universerpc.FetchSupplyLeavesResponse.ignore_leaves:type_name -> universerpc.SupplyLeafEntry - 10, // 72: universerpc.AssetRootResponse.UniverseRootsEntry.value:type_name -> universerpc.UniverseRoot - 5, // 73: universerpc.Universe.MultiverseRoot:input_type -> universerpc.MultiverseRootRequest - 7, // 74: universerpc.Universe.AssetRoots:input_type -> universerpc.AssetRootRequest - 12, // 75: universerpc.Universe.QueryAssetRoots:input_type -> universerpc.AssetRootQuery - 14, // 76: universerpc.Universe.DeleteAssetRoot:input_type -> universerpc.DeleteRootQuery - 18, // 77: universerpc.Universe.AssetLeafKeys:input_type -> universerpc.AssetLeafKeysRequest - 9, // 78: universerpc.Universe.AssetLeaves:input_type -> universerpc.ID - 22, // 79: universerpc.Universe.QueryProof:input_type -> universerpc.UniverseKey - 25, // 80: universerpc.Universe.InsertProof:input_type -> universerpc.AssetProof - 26, // 81: universerpc.Universe.PushProof:input_type -> universerpc.PushProofRequest - 28, // 82: universerpc.Universe.Info:input_type -> universerpc.InfoRequest - 31, // 83: universerpc.Universe.SyncUniverse:input_type -> universerpc.SyncRequest - 36, // 84: universerpc.Universe.ListFederationServers:input_type -> universerpc.ListFederationServersRequest - 38, // 85: universerpc.Universe.AddFederationServer:input_type -> universerpc.AddFederationServerRequest - 40, // 86: universerpc.Universe.DeleteFederationServer:input_type -> universerpc.DeleteFederationServerRequest - 33, // 87: universerpc.Universe.UniverseStats:input_type -> universerpc.StatsRequest - 43, // 88: universerpc.Universe.QueryAssetStats:input_type -> universerpc.AssetStatsQuery - 47, // 89: universerpc.Universe.QueryEvents:input_type -> universerpc.QueryEventsRequest - 50, // 90: universerpc.Universe.SetFederationSyncConfig:input_type -> universerpc.SetFederationSyncConfigRequest - 54, // 91: universerpc.Universe.QueryFederationSyncConfig:input_type -> universerpc.QueryFederationSyncConfigRequest - 56, // 92: universerpc.Universe.IgnoreAssetOutPoint:input_type -> universerpc.IgnoreAssetOutPointRequest - 58, // 93: universerpc.Universe.UpdateSupplyCommit:input_type -> universerpc.UpdateSupplyCommitRequest - 60, // 94: universerpc.Universe.FetchSupplyCommit:input_type -> universerpc.FetchSupplyCommitRequest - 63, // 95: universerpc.Universe.FetchSupplyLeaves:input_type -> universerpc.FetchSupplyLeavesRequest - 6, // 96: universerpc.Universe.MultiverseRoot:output_type -> universerpc.MultiverseRootResponse - 11, // 97: universerpc.Universe.AssetRoots:output_type -> universerpc.AssetRootResponse - 13, // 98: universerpc.Universe.QueryAssetRoots:output_type -> universerpc.QueryRootResponse - 15, // 99: universerpc.Universe.DeleteAssetRoot:output_type -> universerpc.DeleteRootResponse - 19, // 100: universerpc.Universe.AssetLeafKeys:output_type -> universerpc.AssetLeafKeyResponse - 21, // 101: universerpc.Universe.AssetLeaves:output_type -> universerpc.AssetLeafResponse - 23, // 102: universerpc.Universe.QueryProof:output_type -> universerpc.AssetProofResponse - 23, // 103: universerpc.Universe.InsertProof:output_type -> universerpc.AssetProofResponse - 27, // 104: universerpc.Universe.PushProof:output_type -> universerpc.PushProofResponse - 29, // 105: universerpc.Universe.Info:output_type -> universerpc.InfoResponse - 34, // 106: universerpc.Universe.SyncUniverse:output_type -> universerpc.SyncResponse - 37, // 107: universerpc.Universe.ListFederationServers:output_type -> universerpc.ListFederationServersResponse - 39, // 108: universerpc.Universe.AddFederationServer:output_type -> universerpc.AddFederationServerResponse - 41, // 109: universerpc.Universe.DeleteFederationServer:output_type -> universerpc.DeleteFederationServerResponse - 42, // 110: universerpc.Universe.UniverseStats:output_type -> universerpc.StatsResponse - 46, // 111: universerpc.Universe.QueryAssetStats:output_type -> universerpc.UniverseAssetStats - 48, // 112: universerpc.Universe.QueryEvents:output_type -> universerpc.QueryEventsResponse - 51, // 113: universerpc.Universe.SetFederationSyncConfig:output_type -> universerpc.SetFederationSyncConfigResponse - 55, // 114: universerpc.Universe.QueryFederationSyncConfig:output_type -> universerpc.QueryFederationSyncConfigResponse - 57, // 115: universerpc.Universe.IgnoreAssetOutPoint:output_type -> universerpc.IgnoreAssetOutPointResponse - 59, // 116: universerpc.Universe.UpdateSupplyCommit:output_type -> universerpc.UpdateSupplyCommitResponse - 62, // 117: universerpc.Universe.FetchSupplyCommit:output_type -> universerpc.FetchSupplyCommitResponse - 66, // 118: universerpc.Universe.FetchSupplyLeaves:output_type -> universerpc.FetchSupplyLeavesResponse - 96, // [96:119] is the sub-list for method output_type - 73, // [73:96] is the sub-list for method input_type - 73, // [73:73] is the sub-list for extension type_name - 73, // [73:73] is the sub-list for extension extendee - 0, // [0:73] is the sub-list for field type_name + 0, // 0: universerpc.MultiverseRootRequest.proof_type:type_name -> universerpc.ProofType + 9, // 1: universerpc.MultiverseRootRequest.specific_ids:type_name -> universerpc.ID + 8, // 2: universerpc.MultiverseRootResponse.multiverse_root:type_name -> universerpc.MerkleSumNode + 3, // 3: universerpc.AssetRootRequest.direction:type_name -> universerpc.SortDirection + 0, // 4: universerpc.ID.proof_type:type_name -> universerpc.ProofType + 9, // 5: universerpc.UniverseRoot.id:type_name -> universerpc.ID + 8, // 6: universerpc.UniverseRoot.mssmt_root:type_name -> universerpc.MerkleSumNode + 70, // 7: universerpc.UniverseRoot.amounts_by_asset_id:type_name -> universerpc.UniverseRoot.AmountsByAssetIdEntry + 71, // 8: universerpc.AssetRootResponse.universe_roots:type_name -> universerpc.AssetRootResponse.UniverseRootsEntry + 9, // 9: universerpc.AssetRootQuery.id:type_name -> universerpc.ID + 10, // 10: universerpc.QueryRootResponse.issuance_root:type_name -> universerpc.UniverseRoot + 10, // 11: universerpc.QueryRootResponse.transfer_root:type_name -> universerpc.UniverseRoot + 9, // 12: universerpc.DeleteRootQuery.id:type_name -> universerpc.ID + 16, // 13: universerpc.AssetKey.op:type_name -> universerpc.Outpoint + 9, // 14: universerpc.AssetLeafKeysRequest.id:type_name -> universerpc.ID + 3, // 15: universerpc.AssetLeafKeysRequest.direction:type_name -> universerpc.SortDirection + 17, // 16: universerpc.AssetLeafKeyResponse.asset_keys:type_name -> universerpc.AssetKey + 72, // 17: universerpc.AssetLeaf.asset:type_name -> taprpc.Asset + 20, // 18: universerpc.AssetLeafResponse.leaves:type_name -> universerpc.AssetLeaf + 9, // 19: universerpc.UniverseKey.id:type_name -> universerpc.ID + 17, // 20: universerpc.UniverseKey.leaf_key:type_name -> universerpc.AssetKey + 22, // 21: universerpc.AssetProofResponse.req:type_name -> universerpc.UniverseKey + 10, // 22: universerpc.AssetProofResponse.universe_root:type_name -> universerpc.UniverseRoot + 20, // 23: universerpc.AssetProofResponse.asset_leaf:type_name -> universerpc.AssetLeaf + 8, // 24: universerpc.AssetProofResponse.multiverse_root:type_name -> universerpc.MerkleSumNode + 24, // 25: universerpc.AssetProofResponse.issuance_data:type_name -> universerpc.IssuanceData + 73, // 26: universerpc.IssuanceData.meta_reveal:type_name -> taprpc.AssetMeta + 74, // 27: universerpc.IssuanceData.genesis_reveal:type_name -> taprpc.GenesisReveal + 75, // 28: universerpc.IssuanceData.group_key_reveal:type_name -> taprpc.GroupKeyReveal + 22, // 29: universerpc.AssetProof.key:type_name -> universerpc.UniverseKey + 20, // 30: universerpc.AssetProof.asset_leaf:type_name -> universerpc.AssetLeaf + 22, // 31: universerpc.PushProofRequest.key:type_name -> universerpc.UniverseKey + 35, // 32: universerpc.PushProofRequest.server:type_name -> universerpc.UniverseFederationServer + 22, // 33: universerpc.PushProofResponse.key:type_name -> universerpc.UniverseKey + 9, // 34: universerpc.SyncTarget.id:type_name -> universerpc.ID + 1, // 35: universerpc.SyncRequest.sync_mode:type_name -> universerpc.UniverseSyncMode + 30, // 36: universerpc.SyncRequest.sync_targets:type_name -> universerpc.SyncTarget + 10, // 37: universerpc.SyncedUniverse.old_asset_root:type_name -> universerpc.UniverseRoot + 10, // 38: universerpc.SyncedUniverse.new_asset_root:type_name -> universerpc.UniverseRoot + 20, // 39: universerpc.SyncedUniverse.new_asset_leaves:type_name -> universerpc.AssetLeaf + 32, // 40: universerpc.SyncResponse.synced_universes:type_name -> universerpc.SyncedUniverse + 35, // 41: universerpc.ListFederationServersResponse.servers:type_name -> universerpc.UniverseFederationServer + 35, // 42: universerpc.AddFederationServerRequest.servers:type_name -> universerpc.UniverseFederationServer + 35, // 43: universerpc.DeleteFederationServerRequest.servers:type_name -> universerpc.UniverseFederationServer + 4, // 44: universerpc.AssetStatsQuery.asset_type_filter:type_name -> universerpc.AssetTypeFilter + 2, // 45: universerpc.AssetStatsQuery.sort_by:type_name -> universerpc.AssetQuerySort + 3, // 46: universerpc.AssetStatsQuery.direction:type_name -> universerpc.SortDirection + 45, // 47: universerpc.AssetStatsSnapshot.group_anchor:type_name -> universerpc.AssetStatsAsset + 45, // 48: universerpc.AssetStatsSnapshot.asset:type_name -> universerpc.AssetStatsAsset + 76, // 49: universerpc.AssetStatsAsset.asset_type:type_name -> taprpc.AssetType + 44, // 50: universerpc.UniverseAssetStats.asset_stats:type_name -> universerpc.AssetStatsSnapshot + 49, // 51: universerpc.QueryEventsResponse.events:type_name -> universerpc.GroupedUniverseEvents + 52, // 52: universerpc.SetFederationSyncConfigRequest.global_sync_configs:type_name -> universerpc.GlobalFederationSyncConfig + 53, // 53: universerpc.SetFederationSyncConfigRequest.asset_sync_configs:type_name -> universerpc.AssetFederationSyncConfig + 0, // 54: universerpc.GlobalFederationSyncConfig.proof_type:type_name -> universerpc.ProofType + 9, // 55: universerpc.AssetFederationSyncConfig.id:type_name -> universerpc.ID + 9, // 56: universerpc.QueryFederationSyncConfigRequest.id:type_name -> universerpc.ID + 52, // 57: universerpc.QueryFederationSyncConfigResponse.global_sync_configs:type_name -> universerpc.GlobalFederationSyncConfig + 53, // 58: universerpc.QueryFederationSyncConfigResponse.asset_sync_configs:type_name -> universerpc.AssetFederationSyncConfig + 77, // 59: universerpc.IgnoreAssetOutPointRequest.asset_out_point:type_name -> taprpc.AssetOutPoint + 8, // 60: universerpc.IgnoreAssetOutPointResponse.leaf:type_name -> universerpc.MerkleSumNode + 78, // 61: universerpc.FetchSupplyCommitRequest.commit_outpoint:type_name -> taprpc.OutPoint + 78, // 62: universerpc.FetchSupplyCommitRequest.spent_commit_outpoint:type_name -> taprpc.OutPoint + 8, // 63: universerpc.SupplyCommitSubtreeRoot.root_node:type_name -> universerpc.MerkleSumNode + 67, // 64: universerpc.FetchSupplyCommitResponse.chain_data:type_name -> universerpc.SupplyCommitChainData + 61, // 65: universerpc.FetchSupplyCommitResponse.issuance_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot + 61, // 66: universerpc.FetchSupplyCommitResponse.burn_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot + 61, // 67: universerpc.FetchSupplyCommitResponse.ignore_subtree_root:type_name -> universerpc.SupplyCommitSubtreeRoot + 65, // 68: universerpc.FetchSupplyCommitResponse.issuance_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 69: universerpc.FetchSupplyCommitResponse.burn_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 70: universerpc.FetchSupplyCommitResponse.ignore_leaves:type_name -> universerpc.SupplyLeafEntry + 78, // 71: universerpc.FetchSupplyCommitResponse.spent_commitment_outpoint:type_name -> taprpc.OutPoint + 16, // 72: universerpc.SupplyLeafKey.outpoint:type_name -> universerpc.Outpoint + 64, // 73: universerpc.SupplyLeafEntry.leaf_key:type_name -> universerpc.SupplyLeafKey + 8, // 74: universerpc.SupplyLeafEntry.leaf_node:type_name -> universerpc.MerkleSumNode + 65, // 75: universerpc.FetchSupplyLeavesResponse.issuance_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 76: universerpc.FetchSupplyLeavesResponse.burn_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 77: universerpc.FetchSupplyLeavesResponse.ignore_leaves:type_name -> universerpc.SupplyLeafEntry + 67, // 78: universerpc.InsertSupplyCommitRequest.chain_data:type_name -> universerpc.SupplyCommitChainData + 78, // 79: universerpc.InsertSupplyCommitRequest.spent_commitment_outpoint:type_name -> taprpc.OutPoint + 65, // 80: universerpc.InsertSupplyCommitRequest.issuance_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 81: universerpc.InsertSupplyCommitRequest.burn_leaves:type_name -> universerpc.SupplyLeafEntry + 65, // 82: universerpc.InsertSupplyCommitRequest.ignore_leaves:type_name -> universerpc.SupplyLeafEntry + 10, // 83: universerpc.AssetRootResponse.UniverseRootsEntry.value:type_name -> universerpc.UniverseRoot + 5, // 84: universerpc.Universe.MultiverseRoot:input_type -> universerpc.MultiverseRootRequest + 7, // 85: universerpc.Universe.AssetRoots:input_type -> universerpc.AssetRootRequest + 12, // 86: universerpc.Universe.QueryAssetRoots:input_type -> universerpc.AssetRootQuery + 14, // 87: universerpc.Universe.DeleteAssetRoot:input_type -> universerpc.DeleteRootQuery + 18, // 88: universerpc.Universe.AssetLeafKeys:input_type -> universerpc.AssetLeafKeysRequest + 9, // 89: universerpc.Universe.AssetLeaves:input_type -> universerpc.ID + 22, // 90: universerpc.Universe.QueryProof:input_type -> universerpc.UniverseKey + 25, // 91: universerpc.Universe.InsertProof:input_type -> universerpc.AssetProof + 26, // 92: universerpc.Universe.PushProof:input_type -> universerpc.PushProofRequest + 28, // 93: universerpc.Universe.Info:input_type -> universerpc.InfoRequest + 31, // 94: universerpc.Universe.SyncUniverse:input_type -> universerpc.SyncRequest + 36, // 95: universerpc.Universe.ListFederationServers:input_type -> universerpc.ListFederationServersRequest + 38, // 96: universerpc.Universe.AddFederationServer:input_type -> universerpc.AddFederationServerRequest + 40, // 97: universerpc.Universe.DeleteFederationServer:input_type -> universerpc.DeleteFederationServerRequest + 33, // 98: universerpc.Universe.UniverseStats:input_type -> universerpc.StatsRequest + 43, // 99: universerpc.Universe.QueryAssetStats:input_type -> universerpc.AssetStatsQuery + 47, // 100: universerpc.Universe.QueryEvents:input_type -> universerpc.QueryEventsRequest + 50, // 101: universerpc.Universe.SetFederationSyncConfig:input_type -> universerpc.SetFederationSyncConfigRequest + 54, // 102: universerpc.Universe.QueryFederationSyncConfig:input_type -> universerpc.QueryFederationSyncConfigRequest + 56, // 103: universerpc.Universe.IgnoreAssetOutPoint:input_type -> universerpc.IgnoreAssetOutPointRequest + 58, // 104: universerpc.Universe.UpdateSupplyCommit:input_type -> universerpc.UpdateSupplyCommitRequest + 60, // 105: universerpc.Universe.FetchSupplyCommit:input_type -> universerpc.FetchSupplyCommitRequest + 63, // 106: universerpc.Universe.FetchSupplyLeaves:input_type -> universerpc.FetchSupplyLeavesRequest + 68, // 107: universerpc.Universe.InsertSupplyCommit:input_type -> universerpc.InsertSupplyCommitRequest + 6, // 108: universerpc.Universe.MultiverseRoot:output_type -> universerpc.MultiverseRootResponse + 11, // 109: universerpc.Universe.AssetRoots:output_type -> universerpc.AssetRootResponse + 13, // 110: universerpc.Universe.QueryAssetRoots:output_type -> universerpc.QueryRootResponse + 15, // 111: universerpc.Universe.DeleteAssetRoot:output_type -> universerpc.DeleteRootResponse + 19, // 112: universerpc.Universe.AssetLeafKeys:output_type -> universerpc.AssetLeafKeyResponse + 21, // 113: universerpc.Universe.AssetLeaves:output_type -> universerpc.AssetLeafResponse + 23, // 114: universerpc.Universe.QueryProof:output_type -> universerpc.AssetProofResponse + 23, // 115: universerpc.Universe.InsertProof:output_type -> universerpc.AssetProofResponse + 27, // 116: universerpc.Universe.PushProof:output_type -> universerpc.PushProofResponse + 29, // 117: universerpc.Universe.Info:output_type -> universerpc.InfoResponse + 34, // 118: universerpc.Universe.SyncUniverse:output_type -> universerpc.SyncResponse + 37, // 119: universerpc.Universe.ListFederationServers:output_type -> universerpc.ListFederationServersResponse + 39, // 120: universerpc.Universe.AddFederationServer:output_type -> universerpc.AddFederationServerResponse + 41, // 121: universerpc.Universe.DeleteFederationServer:output_type -> universerpc.DeleteFederationServerResponse + 42, // 122: universerpc.Universe.UniverseStats:output_type -> universerpc.StatsResponse + 46, // 123: universerpc.Universe.QueryAssetStats:output_type -> universerpc.UniverseAssetStats + 48, // 124: universerpc.Universe.QueryEvents:output_type -> universerpc.QueryEventsResponse + 51, // 125: universerpc.Universe.SetFederationSyncConfig:output_type -> universerpc.SetFederationSyncConfigResponse + 55, // 126: universerpc.Universe.QueryFederationSyncConfig:output_type -> universerpc.QueryFederationSyncConfigResponse + 57, // 127: universerpc.Universe.IgnoreAssetOutPoint:output_type -> universerpc.IgnoreAssetOutPointResponse + 59, // 128: universerpc.Universe.UpdateSupplyCommit:output_type -> universerpc.UpdateSupplyCommitResponse + 62, // 129: universerpc.Universe.FetchSupplyCommit:output_type -> universerpc.FetchSupplyCommitResponse + 66, // 130: universerpc.Universe.FetchSupplyLeaves:output_type -> universerpc.FetchSupplyLeavesResponse + 69, // 131: universerpc.Universe.InsertSupplyCommit:output_type -> universerpc.InsertSupplyCommitResponse + 108, // [108:132] is the sub-list for method output_type + 84, // [84:108] is the sub-list for method input_type + 84, // [84:84] is the sub-list for extension type_name + 84, // [84:84] is the sub-list for extension extendee + 0, // [0:84] is the sub-list for field type_name } func init() { file_universerpc_universe_proto_init() } @@ -6048,6 +6527,42 @@ func file_universerpc_universe_proto_init() { return nil } } + file_universerpc_universe_proto_msgTypes[62].Exporter = func(v any, i int) any { + switch v := v.(*SupplyCommitChainData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_universerpc_universe_proto_msgTypes[63].Exporter = func(v any, i int) any { + switch v := v.(*InsertSupplyCommitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_universerpc_universe_proto_msgTypes[64].Exporter = func(v any, i int) any { + switch v := v.(*InsertSupplyCommitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_universerpc_universe_proto_msgTypes[4].OneofWrappers = []any{ (*ID_AssetId)(nil), @@ -6068,18 +6583,25 @@ func file_universerpc_universe_proto_init() { file_universerpc_universe_proto_msgTypes[55].OneofWrappers = []any{ (*FetchSupplyCommitRequest_GroupKeyBytes)(nil), (*FetchSupplyCommitRequest_GroupKeyStr)(nil), + (*FetchSupplyCommitRequest_CommitOutpoint)(nil), + (*FetchSupplyCommitRequest_SpentCommitOutpoint)(nil), + (*FetchSupplyCommitRequest_VeryFirst)(nil), } file_universerpc_universe_proto_msgTypes[58].OneofWrappers = []any{ (*FetchSupplyLeavesRequest_GroupKeyBytes)(nil), (*FetchSupplyLeavesRequest_GroupKeyStr)(nil), } + file_universerpc_universe_proto_msgTypes[63].OneofWrappers = []any{ + (*InsertSupplyCommitRequest_GroupKeyBytes)(nil), + (*InsertSupplyCommitRequest_GroupKeyStr)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_universerpc_universe_proto_rawDesc, NumEnums: 5, - NumMessages: 64, + NumMessages: 67, NumExtensions: 0, NumServices: 1, }, diff --git a/taprpc/universerpc/universe.pb.gw.go b/taprpc/universerpc/universe.pb.gw.go index a31d58d95..c0cdfbe49 100644 --- a/taprpc/universerpc/universe.pb.gw.go +++ b/taprpc/universerpc/universe.pb.gw.go @@ -1841,6 +1841,76 @@ func local_request_Universe_FetchSupplyLeaves_0(ctx context.Context, marshaler r } +func request_Universe_InsertSupplyCommit_0(ctx context.Context, marshaler runtime.Marshaler, client UniverseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq InsertSupplyCommitRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["group_key_str"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "group_key_str") + } + + if protoReq.GroupKey == nil { + protoReq.GroupKey = &InsertSupplyCommitRequest_GroupKeyStr{} + } else if _, ok := protoReq.GroupKey.(*InsertSupplyCommitRequest_GroupKeyStr); !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "expect type: *InsertSupplyCommitRequest_GroupKeyStr, but: %t\n", protoReq.GroupKey) + } + protoReq.GroupKey.(*InsertSupplyCommitRequest_GroupKeyStr).GroupKeyStr, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "group_key_str", err) + } + + msg, err := client.InsertSupplyCommit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Universe_InsertSupplyCommit_0(ctx context.Context, marshaler runtime.Marshaler, server UniverseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq InsertSupplyCommitRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["group_key_str"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "group_key_str") + } + + if protoReq.GroupKey == nil { + protoReq.GroupKey = &InsertSupplyCommitRequest_GroupKeyStr{} + } else if _, ok := protoReq.GroupKey.(*InsertSupplyCommitRequest_GroupKeyStr); !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "expect type: *InsertSupplyCommitRequest_GroupKeyStr, but: %t\n", protoReq.GroupKey) + } + protoReq.GroupKey.(*InsertSupplyCommitRequest_GroupKeyStr).GroupKeyStr, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "group_key_str", err) + } + + msg, err := server.InsertSupplyCommit(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterUniverseHandlerServer registers the http handlers for service Universe to "mux". // UnaryRPC :call UniverseServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -2505,7 +2575,7 @@ func RegisterUniverseHandlerServer(ctx context.Context, mux *runtime.ServeMux, s inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/universerpc.Universe/UpdateSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/{group_key_str}")) + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/universerpc.Universe/UpdateSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/update/{group_key_str}")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -2572,6 +2642,31 @@ func RegisterUniverseHandlerServer(ctx context.Context, mux *runtime.ServeMux, s }) + mux.Handle("POST", pattern_Universe_InsertSupplyCommit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/universerpc.Universe/InsertSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/{group_key_str}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Universe_InsertSupplyCommit_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Universe_InsertSupplyCommit_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -3191,7 +3286,7 @@ func RegisterUniverseHandlerClient(ctx context.Context, mux *runtime.ServeMux, c inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/universerpc.Universe/UpdateSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/{group_key_str}")) + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/universerpc.Universe/UpdateSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/update/{group_key_str}")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -3251,6 +3346,28 @@ func RegisterUniverseHandlerClient(ctx context.Context, mux *runtime.ServeMux, c }) + mux.Handle("POST", pattern_Universe_InsertSupplyCommit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/universerpc.Universe/InsertSupplyCommit", runtime.WithHTTPPathPattern("/v1/taproot-assets/universe/supply/{group_key_str}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Universe_InsertSupplyCommit_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Universe_InsertSupplyCommit_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -3307,11 +3424,13 @@ var ( pattern_Universe_IgnoreAssetOutPoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v1", "taproot-assets", "universe", "supply", "ignore"}, "")) - pattern_Universe_UpdateSupplyCommit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "taproot-assets", "universe", "supply", "group_key_str"}, "")) + pattern_Universe_UpdateSupplyCommit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"v1", "taproot-assets", "universe", "supply", "update", "group_key_str"}, "")) pattern_Universe_FetchSupplyCommit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "taproot-assets", "universe", "supply", "group_key_str"}, "")) pattern_Universe_FetchSupplyLeaves_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"v1", "taproot-assets", "universe", "supply", "leaves", "group_key_str"}, "")) + + pattern_Universe_InsertSupplyCommit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "taproot-assets", "universe", "supply", "group_key_str"}, "")) ) var ( @@ -3372,4 +3491,6 @@ var ( forward_Universe_FetchSupplyCommit_0 = runtime.ForwardResponseMessage forward_Universe_FetchSupplyLeaves_0 = runtime.ForwardResponseMessage + + forward_Universe_InsertSupplyCommit_0 = runtime.ForwardResponseMessage ) diff --git a/taprpc/universerpc/universe.pb.json.go b/taprpc/universerpc/universe.pb.json.go index ceae8733e..e4f762173 100644 --- a/taprpc/universerpc/universe.pb.json.go +++ b/taprpc/universerpc/universe.pb.json.go @@ -595,4 +595,29 @@ func RegisterUniverseJSONCallbacks(registry map[string]func(ctx context.Context, } callback(string(respBytes), nil) } + + registry["universerpc.Universe.InsertSupplyCommit"] = func(ctx context.Context, + conn *grpc.ClientConn, reqJSON string, callback func(string, error)) { + + req := &InsertSupplyCommitRequest{} + err := marshaler.Unmarshal([]byte(reqJSON), req) + if err != nil { + callback("", err) + return + } + + client := NewUniverseClient(conn) + resp, err := client.InsertSupplyCommit(ctx, req) + if err != nil { + callback("", err) + return + } + + respBytes, err := marshaler.Marshal(resp) + if err != nil { + callback("", err) + return + } + callback(string(respBytes), nil) + } } diff --git a/taprpc/universerpc/universe.proto b/taprpc/universerpc/universe.proto index 3cb8103e9..cdbc835f7 100644 --- a/taprpc/universerpc/universe.proto +++ b/taprpc/universerpc/universe.proto @@ -187,6 +187,14 @@ service Universe { */ rpc FetchSupplyLeaves (FetchSupplyLeavesRequest) returns (FetchSupplyLeavesResponse); + + /* tapcli: `universe supplycommit insert` + InsertSupplyCommit inserts a supply commitment for a specific asset + group. This includes the commitment details, supply leaves (issuance, burn, + and ignore), and chain proof that proves the commitment has been mined. + */ + rpc InsertSupplyCommit (InsertSupplyCommitRequest) + returns (InsertSupplyCommitResponse); } message MultiverseRootRequest { @@ -796,17 +804,25 @@ message FetchSupplyCommitRequest { string group_key_str = 2; } - // Optional: A list of issuance leaf keys. For each key in this list, - // the endpoint will generate and return an inclusion proof. - repeated bytes issuance_leaf_keys = 3; - - // Optional: A list of burn leaf keys. For each key in this list, - // the endpoint will generate and return an inclusion proof. - repeated bytes burn_leaf_keys = 4; - - // Optional: A list of ignore leaf keys. For each key in this list, the - // endpoint will generate and return an inclusion proof. - repeated bytes ignore_leaf_keys = 5; + // Specifies which supply commit to fetch. + oneof locator { + // Fetch the the supply commitment that created this new commitment + // output on chain. + taprpc.OutPoint commit_outpoint = 3; + + // Fetch the supply commitment that spent the specified commitment + // output on chain to create a new supply commitment. This can be used + // to traverse the chain of supply commitments by watching the spend of + // the commitment output. + taprpc.OutPoint spent_commit_outpoint = 4; + + // Fetch the very first supply commitment for the asset group. This + // returns the initial supply commitment that spent the pre-commitment + // output of the very first asset mint of a grouped asset (also known + // as the group anchor). This is useful as the starting point to fetch + // all supply commitments for a grouped asset one by one. + bool very_first = 5; + } } message SupplyCommitSubtreeRoot { @@ -825,57 +841,46 @@ message SupplyCommitSubtreeRoot { } message FetchSupplyCommitResponse { - // The supply commitment merkle sum root node for the specified asset. - MerkleSumNode supply_commitment_root = 1; - - // The txid of the anchor transaction that commits to the supply - // commitment for the specified asset. - string anchor_txid = 2; - - // The output index of the anchor transaction that commits to the supply - // commitment for the specified asset. - uint32 anchor_tx_out_idx = 3; - - // The transaction output taproot internal key of the anchor transaction - // that commits to the supply commitment for the specified asset. - bytes anchor_tx_out_internal_key = 4; - - // The height of the block at which the supply commitment was anchored. - uint32 block_height = 5; - - // The hash of the block at which the supply commitment was anchored. - bytes block_hash = 6; - - // The index of the transaction in the block that commits to the supply - // commitment. - uint32 block_tx_index = 7; + // The supply commitment chain data that contains both the commitment and + // chain proof information. + SupplyCommitChainData chain_data = 1; // The total number of satoshis in on-chain fees paid by the supply // commitment transaction. - int64 tx_chain_fees_sats = 8; + int64 tx_chain_fees_sats = 2; // The root of the issuance tree for the specified asset. - SupplyCommitSubtreeRoot issuance_subtree_root = 9; + SupplyCommitSubtreeRoot issuance_subtree_root = 3; // The root of the burn tree for the specified asset. - SupplyCommitSubtreeRoot burn_subtree_root = 10; + SupplyCommitSubtreeRoot burn_subtree_root = 4; // The root of the ignore tree for the specified asset. - SupplyCommitSubtreeRoot ignore_subtree_root = 11; + SupplyCommitSubtreeRoot ignore_subtree_root = 5; - // Inclusion proofs for each issuance leaf key provided in the request. - // Each entry corresponds to the key at the same index in - // `issuance_leaf_keys`. - repeated bytes issuance_leaf_inclusion_proofs = 12; + // The issuance leaves that were added by this supply commitment. Does not + // include leaves that were already present in the issuance subtree before + // the block height at which this supply commitment was anchored. + repeated SupplyLeafEntry issuance_leaves = 6; - // Inclusion proofs for each burn leaf key provided in the request. - // Each entry corresponds to the key at the same index in `burn_leaf_keys`. - repeated bytes burn_leaf_inclusion_proofs = 13; + // The burn leaves that were added by this supply commitment. Does not + // include leaves that were already present in the burn subtree before + // the block height at which this supply commitment was anchored. + repeated SupplyLeafEntry burn_leaves = 7; - // Inclusion proofs for each ignored leaf key provided in the request. - // Each entry corresponds to the key at the same index in - // `ignore_leaf_keys`. - repeated bytes ignore_leaf_inclusion_proofs = 14; + // The ignore leaves that were added by this supply commitment. Does not + // include leaves that were already present in the ignore subtree before + // the block height at which this supply commitment was anchored. + repeated SupplyLeafEntry ignore_leaves = 8; + + // The total outstanding supply of the asset after applying all the supply + // changes (issuance, burn, ignore) included in this supply commitment. + uint64 total_outstanding_supply = 9; + + // The outpoint of the previous commitment that this new commitment is + // spending. This must be set unless this is the very first supply + // commitment of a grouped asset. + taprpc.OutPoint spent_commitment_outpoint = 10; } message FetchSupplyLeavesRequest { @@ -895,6 +900,18 @@ message FetchSupplyLeavesRequest { // The end block height for the range of supply leaves to fetch. uint32 block_height_end = 4; + + // Optional: A list of issuance leaf keys. For each key in this list, + // the endpoint will generate and return an inclusion proof. + repeated bytes issuance_leaf_keys = 5; + + // Optional: A list of burn leaf keys. For each key in this list, + // the endpoint will generate and return an inclusion proof. + repeated bytes burn_leaf_keys = 6; + + // Optional: A list of ignore leaf keys. For each key in this list, the + // endpoint will generate and return an inclusion proof. + repeated bytes ignore_leaf_keys = 7; } // SupplyLeafKey identifies a supply leaf entry. It contains the components @@ -931,4 +948,91 @@ message FetchSupplyLeavesResponse { repeated SupplyLeafEntry issuance_leaves = 1; repeated SupplyLeafEntry burn_leaves = 2; repeated SupplyLeafEntry ignore_leaves = 3; + + // Inclusion proofs for each issuance leaf key provided in the request. + // Each entry corresponds to the key at the same index in + // `issuance_leaf_keys`. + repeated bytes issuance_leaf_inclusion_proofs = 4; + + // Inclusion proofs for each burn leaf key provided in the request. + // Each entry corresponds to the key at the same index in `burn_leaf_keys`. + repeated bytes burn_leaf_inclusion_proofs = 5; + + // Inclusion proofs for each ignored leaf key provided in the request. + // Each entry corresponds to the key at the same index in + // `ignore_leaf_keys`. + repeated bytes ignore_leaf_inclusion_proofs = 6; +} + +// SupplyCommitChainData represents the on-chain artifacts for a supply +// commitment update. +message SupplyCommitChainData { + // The raw transaction that created the root commitment. + bytes txn = 1; + + // The index of the output in the transaction where the commitment resides. + uint32 tx_out_idx = 2; + + // The internal key used to create the commitment output. + bytes internal_key = 3; + + // The taproot output key used to create the commitment output. + bytes output_key = 4; + + // The root hash of the supply tree that contains the set of + // sub-commitments. The sum value of this tree is the outstanding supply + // value. + bytes supply_root_hash = 5; + + // The sum value of the supply root tree, representing the outstanding + // supply amount. + uint64 supply_root_sum = 6; + + // The block header of the block that contains the supply commitment + // transaction. + bytes block_header = 7; + + // The hash of the block that contains the commitment. + bytes block_hash = 8; + + // The block height of the block that contains the supply commitment + // transaction. + uint32 block_height = 9; + + // The merkle proof that proves that the supply commitment transaction is + // included in the block. + bytes tx_block_merkle_proof = 10; + + // The index of the supply commitment transaction in the block. + uint32 tx_index = 11; +} + +message InsertSupplyCommitRequest { + // The unique identifier for the target asset group whose supply commitment + // is being inserted. + oneof group_key { + // The 32-byte asset group key specified as raw bytes (gRPC only). + bytes group_key_bytes = 1; + + // The 32-byte asset group key encoded as hex string (use this for + // REST). + string group_key_str = 2; + } + + // The supply commitment chain data that contains both the commitment and + // chain proof information. + SupplyCommitChainData chain_data = 3; + + // The outpoint of the previous commitment that this new commitment is + // spending. This must be set unless this is the very first supply + // commitment of a grouped asset. + taprpc.OutPoint spent_commitment_outpoint = 4; + + // The supply leaves that represent the supply changes for the asset group. + repeated SupplyLeafEntry issuance_leaves = 5; + repeated SupplyLeafEntry burn_leaves = 6; + repeated SupplyLeafEntry ignore_leaves = 7; +} + +message InsertSupplyCommitResponse { } \ No newline at end of file diff --git a/taprpc/universerpc/universe.swagger.json b/taprpc/universerpc/universe.swagger.json index ddb0a2b17..85c6a6362 100644 --- a/taprpc/universerpc/universe.swagger.json +++ b/taprpc/universerpc/universe.swagger.json @@ -1423,6 +1423,82 @@ "required": false, "type": "integer", "format": "int64" + }, + { + "name": "issuance_leaf_keys", + "description": "Optional: A list of issuance leaf keys. For each key in this list,\nthe endpoint will generate and return an inclusion proof.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "collectionFormat": "multi" + }, + { + "name": "burn_leaf_keys", + "description": "Optional: A list of burn leaf keys. For each key in this list,\nthe endpoint will generate and return an inclusion proof.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "collectionFormat": "multi" + }, + { + "name": "ignore_leaf_keys", + "description": "Optional: A list of ignore leaf keys. For each key in this list, the\nendpoint will generate and return an inclusion proof.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "collectionFormat": "multi" + } + ], + "tags": [ + "Universe" + ] + } + }, + "/v1/taproot-assets/universe/supply/update/{group_key_str}": { + "post": { + "summary": "tapcli: `universe updatesupplycommit`\nUpdateSupplyCommit updates the on-chain supply commitment for a specific\nasset group.", + "operationId": "Universe_UpdateSupplyCommit", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/universerpcUpdateSupplyCommitResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "group_key_str", + "description": "The 32-byte asset group key encoded as hex string (use this for\nREST).", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/UniverseUpdateSupplyCommitBody" + } } ], "tags": [ @@ -1465,40 +1541,43 @@ "format": "byte" }, { - "name": "issuance_leaf_keys", - "description": "Optional: A list of issuance leaf keys. For each key in this list,\nthe endpoint will generate and return an inclusion proof.", + "name": "commit_outpoint.txid", + "description": "Raw bytes representing the transaction id.", "in": "query", "required": false, - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "collectionFormat": "multi" + "type": "string", + "format": "byte" }, { - "name": "burn_leaf_keys", - "description": "Optional: A list of burn leaf keys. For each key in this list,\nthe endpoint will generate and return an inclusion proof.", + "name": "commit_outpoint.output_index", + "description": "The index of the output on the transaction.", "in": "query", "required": false, - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "collectionFormat": "multi" + "type": "integer", + "format": "int64" }, { - "name": "ignore_leaf_keys", - "description": "Optional: A list of ignore leaf keys. For each key in this list, the\nendpoint will generate and return an inclusion proof.", + "name": "spent_commit_outpoint.txid", + "description": "Raw bytes representing the transaction id.", "in": "query", "required": false, - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "collectionFormat": "multi" + "type": "string", + "format": "byte" + }, + { + "name": "spent_commit_outpoint.output_index", + "description": "The index of the output on the transaction.", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + }, + { + "name": "very_first", + "description": "Fetch the very first supply commitment for the asset group. This\nreturns the initial supply commitment that spent the pre-commitment\noutput of the very first asset mint of a grouped asset (also known\nas the group anchor). This is useful as the starting point to fetch\nall supply commitments for a grouped asset one by one.", + "in": "query", + "required": false, + "type": "boolean" } ], "tags": [ @@ -1506,13 +1585,13 @@ ] }, "post": { - "summary": "tapcli: `universe updatesupplycommit`\nUpdateSupplyCommit updates the on-chain supply commitment for a specific\nasset group.", - "operationId": "Universe_UpdateSupplyCommit", + "summary": "tapcli: `universe supplycommit insert`\nInsertSupplyCommit inserts a supply commitment for a specific asset\ngroup. This includes the commitment details, supply leaves (issuance, burn,\nand ignore), and chain proof that proves the commitment has been mined.", + "operationId": "Universe_InsertSupplyCommit", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/universerpcUpdateSupplyCommitResponse" + "$ref": "#/definitions/universerpcInsertSupplyCommitResponse" } }, "default": { @@ -1535,7 +1614,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/UniverseUpdateSupplyCommitBody" + "$ref": "#/definitions/UniverseInsertSupplyCommitBody" } } ], @@ -1690,6 +1769,46 @@ } } }, + "UniverseInsertSupplyCommitBody": { + "type": "object", + "properties": { + "group_key_bytes": { + "type": "string", + "format": "byte", + "description": "The 32-byte asset group key specified as raw bytes (gRPC only)." + }, + "chain_data": { + "$ref": "#/definitions/universerpcSupplyCommitChainData", + "description": "The supply commitment chain data that contains both the commitment and\nchain proof information." + }, + "spent_commitment_outpoint": { + "$ref": "#/definitions/taprpcOutPoint", + "description": "The outpoint of the previous commitment that this new commitment is\nspending. This must be set unless this is the very first supply\ncommitment of a grouped asset." + }, + "issuance_leaves": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" + }, + "description": "The supply leaves that represent the supply changes for the asset group." + }, + "burn_leaves": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" + } + }, + "ignore_leaves": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" + } + } + } + }, "UniversePushProofBody": { "type": "object", "properties": { @@ -2077,6 +2196,22 @@ } } }, + "taprpcOutPoint": { + "type": "object", + "properties": { + "txid": { + "type": "string", + "format": "byte", + "description": "Raw bytes representing the transaction id." + }, + "output_index": { + "type": "integer", + "format": "int64", + "description": "The index of the output on the transaction." + } + }, + "description": "Represents a Bitcoin transaction outpoint." + }, "taprpcPrevInputAsset": { "type": "object", "properties": { @@ -2395,38 +2530,9 @@ "universerpcFetchSupplyCommitResponse": { "type": "object", "properties": { - "supply_commitment_root": { - "$ref": "#/definitions/universerpcMerkleSumNode", - "description": "The supply commitment merkle sum root node for the specified asset." - }, - "anchor_txid": { - "type": "string", - "description": "The txid of the anchor transaction that commits to the supply\ncommitment for the specified asset." - }, - "anchor_tx_out_idx": { - "type": "integer", - "format": "int64", - "description": "The output index of the anchor transaction that commits to the supply\ncommitment for the specified asset." - }, - "anchor_tx_out_internal_key": { - "type": "string", - "format": "byte", - "description": "The transaction output taproot internal key of the anchor transaction\nthat commits to the supply commitment for the specified asset." - }, - "block_height": { - "type": "integer", - "format": "int64", - "description": "The height of the block at which the supply commitment was anchored." - }, - "block_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the block at which the supply commitment was anchored." - }, - "block_tx_index": { - "type": "integer", - "format": "int64", - "description": "The index of the transaction in the block that commits to the supply\ncommitment." + "chain_data": { + "$ref": "#/definitions/universerpcSupplyCommitChainData", + "description": "The supply commitment chain data that contains both the commitment and\nchain proof information." }, "tx_chain_fees_sats": { "type": "string", @@ -2445,29 +2551,38 @@ "$ref": "#/definitions/universerpcSupplyCommitSubtreeRoot", "description": "The root of the ignore tree for the specified asset." }, - "issuance_leaf_inclusion_proofs": { + "issuance_leaves": { "type": "array", "items": { - "type": "string", - "format": "byte" + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" }, - "description": "Inclusion proofs for each issuance leaf key provided in the request.\nEach entry corresponds to the key at the same index in\n`issuance_leaf_keys`." + "description": "The issuance leaves that were added by this supply commitment. Does not\ninclude leaves that were already present in the issuance subtree before\nthe block height at which this supply commitment was anchored." }, - "burn_leaf_inclusion_proofs": { + "burn_leaves": { "type": "array", "items": { - "type": "string", - "format": "byte" + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" }, - "description": "Inclusion proofs for each burn leaf key provided in the request.\nEach entry corresponds to the key at the same index in `burn_leaf_keys`." + "description": "The burn leaves that were added by this supply commitment. Does not\ninclude leaves that were already present in the burn subtree before\nthe block height at which this supply commitment was anchored." }, - "ignore_leaf_inclusion_proofs": { + "ignore_leaves": { "type": "array", "items": { - "type": "string", - "format": "byte" + "type": "object", + "$ref": "#/definitions/universerpcSupplyLeafEntry" }, - "description": "Inclusion proofs for each ignored leaf key provided in the request.\nEach entry corresponds to the key at the same index in\n`ignore_leaf_keys`." + "description": "The ignore leaves that were added by this supply commitment. Does not\ninclude leaves that were already present in the ignore subtree before\nthe block height at which this supply commitment was anchored." + }, + "total_outstanding_supply": { + "type": "string", + "format": "uint64", + "description": "The total outstanding supply of the asset after applying all the supply\nchanges (issuance, burn, ignore) included in this supply commitment." + }, + "spent_commitment_outpoint": { + "$ref": "#/definitions/taprpcOutPoint", + "description": "The outpoint of the previous commitment that this new commitment is\nspending. This must be set unless this is the very first supply\ncommitment of a grouped asset." } } }, @@ -2494,6 +2609,30 @@ "type": "object", "$ref": "#/definitions/universerpcSupplyLeafEntry" } + }, + "issuance_leaf_inclusion_proofs": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "description": "Inclusion proofs for each issuance leaf key provided in the request.\nEach entry corresponds to the key at the same index in\n`issuance_leaf_keys`." + }, + "burn_leaf_inclusion_proofs": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "description": "Inclusion proofs for each burn leaf key provided in the request.\nEach entry corresponds to the key at the same index in `burn_leaf_keys`." + }, + "ignore_leaf_inclusion_proofs": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "description": "Inclusion proofs for each ignored leaf key provided in the request.\nEach entry corresponds to the key at the same index in\n`ignore_leaf_keys`." } } }, @@ -2598,6 +2737,9 @@ } } }, + "universerpcInsertSupplyCommitResponse": { + "type": "object" + }, "universerpcIssuanceData": { "type": "object", "properties": { @@ -2801,6 +2943,67 @@ } } }, + "universerpcSupplyCommitChainData": { + "type": "object", + "properties": { + "txn": { + "type": "string", + "format": "byte", + "description": "The raw transaction that created the root commitment." + }, + "tx_out_idx": { + "type": "integer", + "format": "int64", + "description": "The index of the output in the transaction where the commitment resides." + }, + "internal_key": { + "type": "string", + "format": "byte", + "description": "The internal key used to create the commitment output." + }, + "output_key": { + "type": "string", + "format": "byte", + "description": "The taproot output key used to create the commitment output." + }, + "supply_root_hash": { + "type": "string", + "format": "byte", + "description": "The root hash of the supply tree that contains the set of\nsub-commitments. The sum value of this tree is the outstanding supply\nvalue." + }, + "supply_root_sum": { + "type": "string", + "format": "uint64", + "description": "The sum value of the supply root tree, representing the outstanding\nsupply amount." + }, + "block_header": { + "type": "string", + "format": "byte", + "description": "The block header of the block that contains the supply commitment\ntransaction." + }, + "block_hash": { + "type": "string", + "format": "byte", + "description": "The hash of the block that contains the commitment." + }, + "block_height": { + "type": "integer", + "format": "int64", + "description": "The block height of the block that contains the supply commitment\ntransaction." + }, + "tx_block_merkle_proof": { + "type": "string", + "format": "byte", + "description": "The merkle proof that proves that the supply commitment transaction is\nincluded in the block." + }, + "tx_index": { + "type": "integer", + "format": "int64", + "description": "The index of the supply commitment transaction in the block." + } + }, + "description": "SupplyCommitChainData represents the on-chain artifacts for a supply\ncommitment update." + }, "universerpcSupplyCommitSubtreeRoot": { "type": "object", "properties": { diff --git a/taprpc/universerpc/universe.yaml b/taprpc/universerpc/universe.yaml index 2edbefb44..e7518f5f2 100644 --- a/taprpc/universerpc/universe.yaml +++ b/taprpc/universerpc/universe.yaml @@ -85,6 +85,10 @@ http: body: "*" - selector: universerpc.Universe.UpdateSupplyCommit + post: "/v1/taproot-assets/universe/supply/update/{group_key_str}" + body: "*" + + - selector: universerpc.Universe.InsertSupplyCommit post: "/v1/taproot-assets/universe/supply/{group_key_str}" body: "*" diff --git a/taprpc/universerpc/universe_grpc.pb.go b/taprpc/universerpc/universe_grpc.pb.go index 46970afcb..995b10ca4 100644 --- a/taprpc/universerpc/universe_grpc.pb.go +++ b/taprpc/universerpc/universe_grpc.pb.go @@ -135,6 +135,11 @@ type UniverseClient interface { // within a specified block height range. The leaves include issuance, burn, // and ignore leaves, which represent the supply changes for the asset group. FetchSupplyLeaves(ctx context.Context, in *FetchSupplyLeavesRequest, opts ...grpc.CallOption) (*FetchSupplyLeavesResponse, error) + // tapcli: `universe supplycommit insert` + // InsertSupplyCommit inserts a supply commitment for a specific asset + // group. This includes the commitment details, supply leaves (issuance, burn, + // and ignore), and chain proof that proves the commitment has been mined. + InsertSupplyCommit(ctx context.Context, in *InsertSupplyCommitRequest, opts ...grpc.CallOption) (*InsertSupplyCommitResponse, error) } type universeClient struct { @@ -352,6 +357,15 @@ func (c *universeClient) FetchSupplyLeaves(ctx context.Context, in *FetchSupplyL return out, nil } +func (c *universeClient) InsertSupplyCommit(ctx context.Context, in *InsertSupplyCommitRequest, opts ...grpc.CallOption) (*InsertSupplyCommitResponse, error) { + out := new(InsertSupplyCommitResponse) + err := c.cc.Invoke(ctx, "/universerpc.Universe/InsertSupplyCommit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // UniverseServer is the server API for Universe service. // All implementations must embed UnimplementedUniverseServer // for forward compatibility @@ -473,6 +487,11 @@ type UniverseServer interface { // within a specified block height range. The leaves include issuance, burn, // and ignore leaves, which represent the supply changes for the asset group. FetchSupplyLeaves(context.Context, *FetchSupplyLeavesRequest) (*FetchSupplyLeavesResponse, error) + // tapcli: `universe supplycommit insert` + // InsertSupplyCommit inserts a supply commitment for a specific asset + // group. This includes the commitment details, supply leaves (issuance, burn, + // and ignore), and chain proof that proves the commitment has been mined. + InsertSupplyCommit(context.Context, *InsertSupplyCommitRequest) (*InsertSupplyCommitResponse, error) mustEmbedUnimplementedUniverseServer() } @@ -549,6 +568,9 @@ func (UnimplementedUniverseServer) FetchSupplyCommit(context.Context, *FetchSupp func (UnimplementedUniverseServer) FetchSupplyLeaves(context.Context, *FetchSupplyLeavesRequest) (*FetchSupplyLeavesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchSupplyLeaves not implemented") } +func (UnimplementedUniverseServer) InsertSupplyCommit(context.Context, *InsertSupplyCommitRequest) (*InsertSupplyCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InsertSupplyCommit not implemented") +} func (UnimplementedUniverseServer) mustEmbedUnimplementedUniverseServer() {} // UnsafeUniverseServer may be embedded to opt out of forward compatibility for this service. @@ -976,6 +998,24 @@ func _Universe_FetchSupplyLeaves_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _Universe_InsertSupplyCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InsertSupplyCommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UniverseServer).InsertSupplyCommit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/universerpc.Universe/InsertSupplyCommit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UniverseServer).InsertSupplyCommit(ctx, req.(*InsertSupplyCommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Universe_ServiceDesc is the grpc.ServiceDesc for Universe service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1075,6 +1115,10 @@ var Universe_ServiceDesc = grpc.ServiceDesc{ MethodName: "FetchSupplyLeaves", Handler: _Universe_FetchSupplyLeaves_Handler, }, + { + MethodName: "InsertSupplyCommit", + Handler: _Universe_InsertSupplyCommit_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "universerpc/universe.proto", diff --git a/universe/supplycommit/env.go b/universe/supplycommit/env.go index f021930d7..f040bcccb 100644 --- a/universe/supplycommit/env.go +++ b/universe/supplycommit/env.go @@ -1,9 +1,11 @@ package supplycommit import ( + "bytes" "context" "crypto/sha256" "fmt" + "net/url" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" @@ -24,6 +26,12 @@ import ( "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) +var ( + // ErrNoBlockInfo is returned when a root commitment is expected to have + // block information, but it is missing. + ErrNoBlockInfo = fmt.Errorf("no block info available") +) + const ( // DefaultCommitConfTarget is the default confirmation target used when // crafting the commitment transaction. This is used in fee estimation. @@ -61,6 +69,13 @@ func (s SupplySubTree) String() string { } } +// AllSupplySubTrees contains all possible valid SupplySubTree values. +var AllSupplySubTrees = []SupplySubTree{ + MintTreeType, + BurnTreeType, + IgnoreTreeType, +} + // UniverseKey is the key used to identify the universe in the supply tree. This // is scoped to a root supply tree for a given asset specifier. func (s SupplySubTree) UniverseKey() [32]byte { @@ -102,6 +117,142 @@ type SupplyLeaves struct { IgnoreLeafEntries []NewIgnoreEvent } +// AllUpdates returns a slice of all supply update events contained within +// the SupplyLeaves instance. This includes mints, burns, and ignores. +func (s SupplyLeaves) AllUpdates() []SupplyUpdateEvent { + mint := func(e NewMintEvent) SupplyUpdateEvent { + return &e + } + burn := func(e NewBurnEvent) SupplyUpdateEvent { + return &e + } + ignore := func(e NewIgnoreEvent) SupplyUpdateEvent { + return &e + } + allUpdates := make( + []SupplyUpdateEvent, 0, len(s.IssuanceLeafEntries)+ + len(s.BurnLeafEntries)+len(s.IgnoreLeafEntries), + ) + allUpdates = append(allUpdates, fn.Map(s.IssuanceLeafEntries, mint)...) + allUpdates = append(allUpdates, fn.Map(s.BurnLeafEntries, burn)...) + allUpdates = append(allUpdates, fn.Map(s.IgnoreLeafEntries, ignore)...) + + return allUpdates +} + +// ValidateBlockHeights ensures that all supply leaves have a non-zero block +// height. +func (s SupplyLeaves) ValidateBlockHeights() error { + // Block height must be non-zero for all leaves. + for _, leaf := range s.IssuanceLeafEntries { + if leaf.BlockHeight() == 0 { + return fmt.Errorf("mint leaf has zero block height") + } + } + + for _, leaf := range s.BurnLeafEntries { + if leaf.BlockHeight() == 0 { + return fmt.Errorf("burn leaf has zero block height") + } + } + + for _, leaf := range s.IgnoreLeafEntries { + if leaf.BlockHeight() == 0 { + return fmt.Errorf("ignore leaf has zero block height") + } + } + + return nil +} + +// NewSupplyLeavesFromEvents creates a SupplyLeaves instance from a slice of +// SupplyUpdateEvent instances. +func NewSupplyLeavesFromEvents(events []SupplyUpdateEvent) (SupplyLeaves, + error) { + + var leaves SupplyLeaves + for idx := range events { + event := events[idx] + + switch e := event.(type) { + case *NewMintEvent: + leaves.IssuanceLeafEntries = append( + leaves.IssuanceLeafEntries, *e, + ) + + case *NewBurnEvent: + leaves.BurnLeafEntries = append( + leaves.BurnLeafEntries, *e, + ) + + case *NewIgnoreEvent: + leaves.IgnoreLeafEntries = append( + leaves.IgnoreLeafEntries, *e, + ) + + default: + return leaves, fmt.Errorf("unknown event type: %T", e) + } + } + + return leaves, nil +} + +// AssetLookup is an interface that allows us to query for asset +// information, such as asset groups and asset metadata. +type AssetLookup interface { + // QueryAssetGroupByGroupKey fetches the asset group with a matching + // tweaked key, including the genesis information used to create the + // group. + QueryAssetGroupByGroupKey(ctx context.Context, + groupKey *btcec.PublicKey) (*asset.AssetGroup, error) + + // FetchAssetMetaForAsset attempts to fetch an asset meta based on an + // asset ID. + FetchAssetMetaForAsset(ctx context.Context, + assetID asset.ID) (*proof.MetaReveal, error) + + // FetchInternalKeyLocator attempts to fetch the key locator information + // for the given raw internal key. If the key cannot be found, then + // ErrInternalKeyNotFound is returned. + FetchInternalKeyLocator(ctx context.Context, + rawKey *btcec.PublicKey) (keychain.KeyLocator, error) +} + +// FetchLatestAssetMetadata returns the latest asset metadata for the +// given asset specifier. +func FetchLatestAssetMetadata(ctx context.Context, lookup AssetLookup, + assetSpec asset.Specifier) (proof.MetaReveal, error) { + + var zero proof.MetaReveal + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return zero, err + } + + // TODO(ffranr): This currently retrieves asset metadata using the + // genesis ID. Update it to retrieve by the latest asset ID instead, + // which will provide access to the most up-to-date canonical universe + // list. + assetGroup, err := lookup.QueryAssetGroupByGroupKey(ctx, groupKey) + if err != nil { + return zero, fmt.Errorf("unable to fetch asset group "+ + "by group key: %w", err) + } + + // Retrieve the asset metadata for the asset group. This will + // include the delegation key and universe commitment flag. + metaReveal, err := lookup.FetchAssetMetaForAsset( + ctx, assetGroup.Genesis.ID(), + ) + if err != nil { + return zero, fmt.Errorf("faild to fetch asset meta: %w", err) + } + + return *metaReveal, nil +} + // SupplyTreeView is an interface that allows the state machine to obtain an up // to date snapshot of the root supply tree, as the sub trees (ignore, burn, // mint) committed in the main supply tree. @@ -112,9 +263,10 @@ type SupplyTreeView interface { FetchSubTree(ctx context.Context, assetSpec asset.Specifier, treeType SupplySubTree) lfn.Result[mssmt.Tree] - // FetchSubTrees returns all the sub trees for the given asset spec. + // FetchSubTrees returns all the subtrees for the given asset spec. FetchSubTrees(ctx context.Context, - assetSpec asset.Specifier) lfn.Result[SupplyTrees] + assetSpec asset.Specifier, + blockHeightEnd fn.Option[uint32]) lfn.Result[SupplyTrees] // FetchRootSupplyTree returns the root supply tree which contains a // commitment to each of the sub trees. @@ -156,10 +308,17 @@ type PreCommitment struct { // TxIn returns the transaction input that corresponds to the pre-commitment. func (p *PreCommitment) TxIn() *wire.TxIn { return &wire.TxIn{ - PreviousOutPoint: wire.OutPoint{ - Hash: p.MintingTxn.TxHash(), - Index: p.OutIdx, - }, + PreviousOutPoint: p.OutPoint(), + } +} + +// OutPoint returns the outpoint that corresponds to the pre-commitment output. +// This is the output that is spent by the supply commitment anchoring +// transaction. +func (p *PreCommitment) OutPoint() wire.OutPoint { + return wire.OutPoint{ + Hash: p.MintingTxn.TxHash(), + Index: p.OutIdx, } } @@ -181,6 +340,14 @@ type CommitmentBlock struct { // the block. TxIndex uint32 + // BlockHeader is the block header of the block that contains the + // commitment. + BlockHeader *wire.BlockHeader + + // MerkleProof is the merkle proof that proves that the supply + // commitment transaction is included in the block. + MerkleProof *proof.TxMerkleProof + // ChainFees is the amount in sats paid in on-chain fees for the // supply commitment transaction. ChainFees int64 @@ -212,6 +379,11 @@ type RootCommitment struct { // asset supply. This may be None if the commitment has not yet // been mined. CommitmentBlock fn.Option[CommitmentBlock] + + // SpentCommitment is the outpoint of the previous root commitment that + // this root commitment is spending. This will be None if this is the + // first root commitment for the asset. + SpentCommitment fn.Option[wire.OutPoint] } // TxIn returns the transaction input that corresponds to the root commitment. @@ -270,6 +442,92 @@ func (r *RootCommitment) TapscriptRoot() ([]byte, error) { return computeSupplyCommitTapscriptRoot(supplyRootHash) } +// VerifyChainAnchor checks that the on-chain information is correct. +func (r *RootCommitment) VerifyChainAnchor(merkleVerifier proof.MerkleVerifier, + headerVerifier proof.HeaderVerifier) error { + + block, err := r.CommitmentBlock.UnwrapOrErr(ErrNoBlockInfo) + if err != nil { + return fmt.Errorf("unable to verify root commitment: %w", err) + } + + if block.MerkleProof == nil { + return fmt.Errorf("merkle proof is missing") + } + + if block.BlockHeader == nil { + return fmt.Errorf("block header is missing") + } + + if block.Hash != block.BlockHeader.BlockHash() { + return fmt.Errorf("block hash %v does not match block header "+ + "hash %v", block.Hash, block.BlockHeader.BlockHash()) + } + + if r.Txn == nil { + return fmt.Errorf("root commitment transaction is missing") + } + + if r.SupplyRoot == nil { + return fmt.Errorf("supply root is missing") + } + + err = fn.MapOptionZ( + r.SpentCommitment, func(prevOut wire.OutPoint) error { + if !proof.TxSpendsPrevOut(r.Txn, &prevOut) { + return fmt.Errorf("commitment TX doesn't " + + "spend previous commitment outpoint") + } + + return nil + }, + ) + if err != nil { + return fmt.Errorf("unable to verify spent commitment: %w", err) + } + + err = merkleVerifier( + r.Txn, block.MerkleProof, block.BlockHeader.MerkleRoot, + ) + if err != nil { + return fmt.Errorf("unable to verify merkle proof: %w", err) + } + + err = headerVerifier(*block.BlockHeader, block.Height) + if err != nil { + return fmt.Errorf("unable to verify block header: %w", err) + } + + if r.TxOutIdx >= uint32(len(r.Txn.TxOut)) { + return fmt.Errorf("tx out index %d is out of bounds for "+ + "transaction with %d outputs", r.TxOutIdx, + len(r.Txn.TxOut)) + } + + txOut := r.Txn.TxOut[r.TxOutIdx] + expectedOut, _, err := RootCommitTxOut( + r.InternalKey.PubKey, nil, r.SupplyRoot.NodeHash(), + ) + if err != nil { + return fmt.Errorf("unable to create expected output: %w", err) + } + + if txOut.Value != expectedOut.Value { + return fmt.Errorf("tx out value %d does not match expected "+ + "value %d", txOut.Value, expectedOut.Value) + } + + if !bytes.Equal(txOut.PkScript, expectedOut.PkScript) { + return fmt.Errorf("tx out pk script %x does not match "+ + "expected pk script %x", txOut.PkScript, + expectedOut.PkScript) + } + + // Everything that we can check just from the static information + // provided checks out. + return nil +} + // RootCommitTxOut returns the transaction output that corresponds to the root // commitment. This is used to create a new commitment output. func RootCommitTxOut(internalKey *btcec.PublicKey, @@ -444,9 +702,8 @@ type StateMachineStore interface { // error will be returned. // // TODO(roasbeef): also have it return the next event if exists? - FetchState(context.Context, asset.Specifier) ( - State, lfn.Option[SupplyStateTransition], error, - ) + FetchState(context.Context, asset.Specifier) (State, + lfn.Option[SupplyStateTransition], error) // ApplyStateTransition is used to apply a new state transition to the // target state machine. Once the transition has been applied, the state @@ -480,6 +737,25 @@ type StateMachineStore interface { asset.Specifier) ([]SupplyUpdateEvent, error) } +// SupplySyncer is an interface that allows the state machine to insert +// supply commitments into the remote universe server. +type SupplySyncer interface { + // PushSupplyCommitment pushes a supply commitment to the remote + // universe server. This function should block until the sync insertion + // is complete. + // + // Returns a map of per-server errors keyed by server host string and + // an internal error. If all pushes succeed, both return values are nil. + // If some pushes fail, the map contains only the failed servers and + // their corresponding errors. If there's an internal/system error that + // prevents the operation from proceeding, it's returned as the second + // value. + PushSupplyCommitment(ctx context.Context, assetSpec asset.Specifier, + commitment RootCommitment, updateLeaves SupplyLeaves, + chainProof ChainProof, + canonicalUniverses []url.URL) (map[string]error, error) +} + // Environment is a set of dependencies that a state machine may need to carry // out the logic for a given state transition. All fields are to be considered // immutable, and will be fixed for the lifetime of the state machine. @@ -500,6 +776,10 @@ type Environment struct { // Wallet is the main wallet interface used to managed PSBT packets. Wallet Wallet + // AssetLookup is used to look up asset information such as asset groups + // and asset metadata. + AssetLookup AssetLookup + // KeyRing is the main key ring interface used to manage keys. KeyRing KeyRing @@ -508,6 +788,10 @@ type Environment struct { // TODO(roasbeef): can make a slimmer version of Chain tapgarden.ChainBridge + // SupplySyncer is used to insert supply commitments into the remote + // universe server. + SupplySyncer SupplySyncer + // StateLog is the main state log that is used to track the state of the // state machine. This is used to persist the state of the state machine // across restarts. diff --git a/universe/supplycommit/multi_sm_manager.go b/universe/supplycommit/manager.go similarity index 75% rename from universe/supplycommit/multi_sm_manager.go rename to universe/supplycommit/manager.go index 1597a9813..c3859091d 100644 --- a/universe/supplycommit/multi_sm_manager.go +++ b/universe/supplycommit/manager.go @@ -2,15 +2,18 @@ package supplycommit import ( "context" + "errors" "fmt" "sync" "time" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg" + "github.com/lightninglabs/taproot-assets/address" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapgarden" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightningnetwork/lnd/msgmux" @@ -34,10 +37,10 @@ type DaemonAdapters interface { Stop() error } -// MultiStateMachineManagerCfg is the configuration for the -// MultiStateMachineManager. It contains all the dependencies needed to +// ManagerCfg is the configuration for the +// Manager. It contains all the dependencies needed to // manage multiple supply commitment state machines, one for each asset group. -type MultiStateMachineManagerCfg struct { +type ManagerCfg struct { // TreeView is the interface that allows the state machine to obtain an // up to date snapshot of the root supply tree, and the relevant set of // subtrees. @@ -50,6 +53,10 @@ type MultiStateMachineManagerCfg struct { // Wallet is the interface used interact with the wallet. Wallet Wallet + // AssetLookup is used to look up asset information such as asset groups + // and asset metadata. + AssetLookup AssetLookup + // KeyRing is the key ring used to derive new keys. KeyRing KeyRing @@ -58,6 +65,10 @@ type MultiStateMachineManagerCfg struct { // TODO(roasbeef): can make a slimmer version of Chain tapgarden.ChainBridge + // SupplySyncer is used to insert supply commitments into the remote + // universe server. + SupplySyncer SupplySyncer + // DaemonAdapters is a set of adapters that allow the state machine to // interact with external daemons whilst processing internal events. DaemonAdapters DaemonAdapters @@ -76,12 +87,12 @@ type MultiStateMachineManagerCfg struct { IgnoreCheckerCache IgnoreCheckerCache } -// MultiStateMachineManager is a manager for multiple supply commitment state +// Manager is a manager for multiple supply commitment state // machines, one for each asset group. It is responsible for starting and // stopping the state machines, as well as forwarding sending events to them. -type MultiStateMachineManager struct { +type Manager struct { // cfg is the configuration for the multi state machine manager. - cfg MultiStateMachineManagerCfg + cfg ManagerCfg // smCache is a cache that maps asset group public keys to their // supply commitment state machines. @@ -95,11 +106,9 @@ type MultiStateMachineManager struct { stopOnce sync.Once } -// NewMultiStateMachineManager creates a new multi state machine manager. -func NewMultiStateMachineManager( - cfg MultiStateMachineManagerCfg) *MultiStateMachineManager { - - return &MultiStateMachineManager{ +// NewManager creates a new multi state machine manager. +func NewManager(cfg ManagerCfg) *Manager { + return &Manager{ cfg: cfg, ContextGuard: &fn.ContextGuard{ DefaultTimeout: DefaultTimeout, @@ -109,7 +118,7 @@ func NewMultiStateMachineManager( } // Start starts the multi state machine manager. -func (m *MultiStateMachineManager) Start() error { +func (m *Manager) Start() error { m.startOnce.Do(func() { // Initialize the state machine cache. m.smCache = newStateMachineCache() @@ -120,7 +129,7 @@ func (m *MultiStateMachineManager) Start() error { // Stop stops the multi state machine manager, which in turn stops all asset // group key specific supply commitment state machines. -func (m *MultiStateMachineManager) Stop() error { +func (m *Manager) Stop() error { m.stopOnce.Do(func() { // Cancel the state machine context to signal all state machines // to stop. @@ -133,10 +142,59 @@ func (m *MultiStateMachineManager) Stop() error { return nil } +// ensureSupplyCommitSupport verifies that the asset group for the given +// asset specifier supports supply commitments, and that this node can generate +// supply commitments for it. +func (m *Manager) ensureSupplyCommitSupport(ctx context.Context, + metaReveal proof.MetaReveal) error { + + // If the universe commitment flag is not set on the asset metadata, + // then the asset group does not support supply commitments. + if !metaReveal.UniverseCommitments { + return fmt.Errorf("asset group metadata universe " + + "commitments flag indicates that asset does not " + + "support supply commitments") + } + + // If a delegation key is not present, then the asset group does not + // support supply commitments. + if metaReveal.DelegationKey.IsNone() { + return fmt.Errorf("asset group metadata does not " + + "specify delegation key, which is required for " + + "supply commitments") + } + + // Extract supply commitment delegation pub key from the asset metadata. + delegationPubKey, err := metaReveal.DelegationKey.UnwrapOrErr( + fmt.Errorf("delegation key not found for given asset"), + ) + if err != nil { + return err + } + + // Fetch the delegation key locator. We need to ensure that the + // delegation key is owned by this node, so that we can generate + // supply commitments (ignore tuples) for this asset group. + _, err = m.cfg.AssetLookup.FetchInternalKeyLocator( + ctx, &delegationPubKey, + ) + switch { + case errors.Is(err, address.ErrInternalKeyNotFound): + return fmt.Errorf("delegation key locator not found; "+ + "only delegation key owners can ignore asset "+ + "outpoints for this asset group: %w", err) + case err != nil: + return fmt.Errorf("failed to fetch delegation key locator: %w", + err) + } + + return nil +} + // fetchStateMachine retrieves a state machine from the cache or creates a // new one if it doesn't exist. If a new state machine is created, it is also // started. -func (m *MultiStateMachineManager) fetchStateMachine( +func (m *Manager) fetchStateMachine( assetSpec asset.Specifier) (*StateMachine, error) { groupKey, err := assetSpec.UnwrapGroupKeyOrErr() @@ -153,13 +211,35 @@ func (m *MultiStateMachineManager) fetchStateMachine( } // If the state machine is not found, create a new one. + // + // Before we can create a state machine, we need to ensure that the + // asset group supports supply commitments. If it doesn't, then we + // return an error. + ctx, cancel := m.WithCtxQuitNoTimeout() + defer cancel() + + metaReveal, err := FetchLatestAssetMetadata( + ctx, m.cfg.AssetLookup, assetSpec, + ) + if err != nil { + return nil, fmt.Errorf("faild to fetch asset meta: %w", err) + } + + err = m.ensureSupplyCommitSupport(ctx, metaReveal) + if err != nil { + return nil, fmt.Errorf("failed to ensure supply commit "+ + "support for asset: %w", err) + } + env := &Environment{ AssetSpec: assetSpec, TreeView: m.cfg.TreeView, Commitments: m.cfg.Commitments, Wallet: m.cfg.Wallet, + AssetLookup: m.cfg.AssetLookup, KeyRing: m.cfg.KeyRing, Chain: m.cfg.Chain, + SupplySyncer: m.cfg.SupplySyncer, StateLog: m.cfg.StateLog, CommitConfTarget: DefaultCommitConfTarget, ChainParams: m.cfg.ChainParams, @@ -168,9 +248,6 @@ func (m *MultiStateMachineManager) fetchStateMachine( // Before we start the state machine, we'll need to fetch the current // state from disk, to see if we need to emit any new events. - ctx, cancel := m.WithCtxQuitNoTimeout() - defer cancel() - initialState, _, err := m.cfg.StateLog.FetchState(ctx, assetSpec) if err != nil { return nil, fmt.Errorf("unable to fetch current state: %w", err) @@ -218,13 +295,13 @@ func (m *MultiStateMachineManager) fetchStateMachine( // SendEvent sends an event to the state machine associated with the given asset // specifier. If a state machine for the asset group does not exist, it will be // created and started. -func (m *MultiStateMachineManager) SendEvent(ctx context.Context, +func (m *Manager) SendEvent(ctx context.Context, assetSpec asset.Specifier, event Event) error { sm, err := m.fetchStateMachine(assetSpec) if err != nil { - return fmt.Errorf("unable to get or create state "+ - "machine: %w", err) + return fmt.Errorf("unable to get or create state machine: %w", + err) } sm.SendEvent(ctx, event) @@ -236,8 +313,8 @@ func (m *MultiStateMachineManager) SendEvent(ctx context.Context, // that the event has been durably persisted. If the event doesn't support // synchronous processing (i.e., it's not a SupplyUpdateEvent), this method will // return an error. -func (m *MultiStateMachineManager) SendEventSync(ctx context.Context, - assetSpec asset.Specifier, event SyncSupplyUpdateEvent) error { +func (m *Manager) SendEventSync(ctx context.Context, assetSpec asset.Specifier, + event SyncSupplyUpdateEvent) error { // Only SupplyUpdateEvents can be processed synchronously. supplyEvent, ok := event.(SupplyUpdateEvent) @@ -274,9 +351,9 @@ func (m *MultiStateMachineManager) SendEventSync(ctx context.Context, // SendMintEvent sends a mint event to the supply commitment state machine. // // NOTE: This implements the tapgarden.MintSupplyCommitter interface. -func (m *MultiStateMachineManager) SendMintEvent(ctx context.Context, - assetSpec asset.Specifier, leafKey universe.UniqueLeafKey, - issuanceProof universe.Leaf, mintBlockHeight uint32) error { +func (m *Manager) SendMintEvent(ctx context.Context, assetSpec asset.Specifier, + leafKey universe.UniqueLeafKey, issuanceProof universe.Leaf, + mintBlockHeight uint32) error { mintEvent := &NewMintEvent{ LeafKey: leafKey, @@ -290,8 +367,8 @@ func (m *MultiStateMachineManager) SendMintEvent(ctx context.Context, // SendBurnEvent sends a burn event to the supply commitment state machine. // // NOTE: This implements the tapfreighter.BurnSupplyCommitter interface. -func (m *MultiStateMachineManager) SendBurnEvent(ctx context.Context, - assetSpec asset.Specifier, burnLeaf universe.BurnLeaf) error { +func (m *Manager) SendBurnEvent(ctx context.Context, assetSpec asset.Specifier, + burnLeaf universe.BurnLeaf) error { burnEvent := &NewBurnEvent{ BurnLeaf: burnLeaf, @@ -300,10 +377,25 @@ func (m *MultiStateMachineManager) SendBurnEvent(ctx context.Context, return m.SendEventSync(ctx, assetSpec, burnEvent) } +// StartSupplyPublishFlow triggers the state machine to build and publish +// a new supply commitment if pending supply tree updates exist. +func (m *Manager) StartSupplyPublishFlow(ctx context.Context, + assetSpec asset.Specifier) error { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return fmt.Errorf("unable to get or create state machine: %w", + err) + } + + sm.SendEvent(ctx, &CommitTickEvent{}) + return nil +} + // CanHandle determines if the state machine associated with the given asset // specifier can handle the given message. If a state machine for the asset // group does not exist, it will be created and started. -func (m *MultiStateMachineManager) CanHandle(assetSpec asset.Specifier, +func (m *Manager) CanHandle(assetSpec asset.Specifier, msg msgmux.PeerMsg) (bool, error) { sm, err := m.fetchStateMachine(assetSpec) @@ -318,7 +410,7 @@ func (m *MultiStateMachineManager) CanHandle(assetSpec asset.Specifier, // Name returns the name of the state machine associated with the given asset // specifier. If a state machine for the asset group does not exist, it will be // created and started. -func (m *MultiStateMachineManager) Name( +func (m *Manager) Name( assetSpec asset.Specifier) (string, error) { sm, err := m.fetchStateMachine(assetSpec) @@ -333,7 +425,7 @@ func (m *MultiStateMachineManager) Name( // SendMessage sends a message to the state machine associated with the given // asset specifier. If a state machine for the asset group does not exist, it // will be created and started. -func (m *MultiStateMachineManager) SendMessage(ctx context.Context, +func (m *Manager) SendMessage(ctx context.Context, assetSpec asset.Specifier, msg msgmux.PeerMsg) (bool, error) { sm, err := m.fetchStateMachine(assetSpec) @@ -348,7 +440,7 @@ func (m *MultiStateMachineManager) SendMessage(ctx context.Context, // CurrentState returns the current state of the state machine associated with // the given asset specifier. If a state machine for the asset group does not // exist, it will be created and started. -func (m *MultiStateMachineManager) CurrentState(assetSpec asset.Specifier) ( +func (m *Manager) CurrentState(assetSpec asset.Specifier) ( protofsm.State[Event, *Environment], error) { sm, err := m.fetchStateMachine(assetSpec) @@ -363,7 +455,7 @@ func (m *MultiStateMachineManager) CurrentState(assetSpec asset.Specifier) ( // RegisterStateEvents registers a state event subscriber with the state machine // associated with the given asset specifier. If a state machine for the asset // group does not exist, it will be created and started. -func (m *MultiStateMachineManager) RegisterStateEvents( +func (m *Manager) RegisterStateEvents( assetSpec asset.Specifier) (StateSub, error) { sm, err := m.fetchStateMachine(assetSpec) @@ -378,7 +470,7 @@ func (m *MultiStateMachineManager) RegisterStateEvents( // RemoveStateSub removes a state event subscriber from the state machine // associated with the given asset specifier. If a state machine for the asset // group does not exist, it will be created and started. -func (m *MultiStateMachineManager) RemoveStateSub(assetSpec asset.Specifier, +func (m *Manager) RemoveStateSub(assetSpec asset.Specifier, sub StateSub) error { sm, err := m.fetchStateMachine(assetSpec) @@ -405,56 +497,9 @@ type FetchCommitmentResp struct { ChainCommitment RootCommitment } -// FetchCommitment fetches the supply commitment for the given asset specifier. -func (m *MultiStateMachineManager) FetchCommitment(ctx context.Context, - assetSpec asset.Specifier) (fn.Option[FetchCommitmentResp], error) { - - var zero fn.Option[FetchCommitmentResp] - - chainCommitOpt, err := m.cfg.Commitments.SupplyCommit( - ctx, assetSpec, - ).Unpack() - if err != nil { - return zero, fmt.Errorf("unable to fetch supply commit: %w", - err) - } - - if chainCommitOpt.IsNone() { - // If the chain commitment is not present, we return an empty - // response. - return zero, nil - } - chainCommit, err := chainCommitOpt.UnwrapOrErr( - fmt.Errorf("unable to fetch supply commit: %w", err), - ) - if err != nil { - return zero, err - } - - supplyTree, err := m.cfg.TreeView.FetchRootSupplyTree( - ctx, assetSpec, - ).Unpack() - if err != nil { - return zero, fmt.Errorf("unable to fetch supply commit root "+ - "supply tree: %w", err) - } - - subtrees, err := m.cfg.TreeView.FetchSubTrees(ctx, assetSpec).Unpack() - if err != nil { - return zero, fmt.Errorf("unable to fetch supply commit sub "+ - "trees: %w", err) - } - - return fn.Some(FetchCommitmentResp{ - SupplyTree: supplyTree, - Subtrees: subtrees, - ChainCommitment: chainCommit, - }), nil -} - // FetchSupplyLeavesByHeight returns the set of supply leaves for the given // asset specifier within the specified height range. -func (m *MultiStateMachineManager) FetchSupplyLeavesByHeight( +func (m *Manager) FetchSupplyLeavesByHeight( ctx context.Context, assetSpec asset.Specifier, startHeight, endHeight uint32) (SupplyLeaves, error) { @@ -471,6 +516,23 @@ func (m *MultiStateMachineManager) FetchSupplyLeavesByHeight( return resp, nil } +// FetchSubTrees returns all the sub trees for the given asset specifier. +func (m *Manager) FetchSubTrees(ctx context.Context, + assetSpec asset.Specifier, + blockHeightEnd fn.Option[uint32]) (SupplyTrees, error) { + + var zero SupplyTrees + + subtrees, err := m.cfg.TreeView.FetchSubTrees( + ctx, assetSpec, blockHeightEnd, + ).Unpack() + if err != nil { + return zero, fmt.Errorf("unable to fetch sub trees: %w", err) + } + + return subtrees, nil +} + // stateMachineCache is a thread-safe cache mapping an asset group's public key // to its supply commitment state machine. type stateMachineCache struct { diff --git a/universe/supplycommit/mock.go b/universe/supplycommit/mock.go index 8fea33c5d..13c9f2e2a 100644 --- a/universe/supplycommit/mock.go +++ b/universe/supplycommit/mock.go @@ -2,6 +2,7 @@ package supplycommit import ( "context" + "net/url" "sync" "github.com/btcsuite/btcd/btcec/v2" @@ -10,6 +11,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapsend" @@ -35,7 +37,8 @@ func (m *mockSupplyTreeView) FetchSubTree(_ context.Context, } func (m *mockSupplyTreeView) FetchSubTrees(_ context.Context, - assetSpec asset.Specifier) lfn.Result[SupplyTrees] { + assetSpec asset.Specifier, + blockHeightEnd fn.Option[uint32]) lfn.Result[SupplyTrees] { args := m.Called(assetSpec) return args.Get(0).(lfn.Result[SupplyTrees]) @@ -427,3 +430,57 @@ type mockIgnoreCheckerCache struct { func (c *mockIgnoreCheckerCache) InvalidateCache(groupKey btcec.PublicKey) { c.Called(groupKey) } + +// mockAssetLookup is a mock implementation of the AssetLookup interface. +type mockAssetLookup struct { + mock.Mock +} + +func (m *mockAssetLookup) QueryAssetGroupByGroupKey(ctx context.Context, + groupKey *btcec.PublicKey) (*asset.AssetGroup, error) { + + args := m.Called(ctx, groupKey) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*asset.AssetGroup), args.Error(1) +} + +func (m *mockAssetLookup) FetchAssetMetaForAsset(ctx context.Context, + assetID asset.ID) (*proof.MetaReveal, error) { + + args := m.Called(ctx, assetID) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*proof.MetaReveal), args.Error(1) +} + +func (m *mockAssetLookup) FetchInternalKeyLocator(ctx context.Context, + rawKey *btcec.PublicKey) (keychain.KeyLocator, error) { + + args := m.Called(ctx, rawKey) + return args.Get(0).(keychain.KeyLocator), args.Error(1) +} + +// mockSupplySyncer is a mock implementation of the SupplySyncer interface. +type mockSupplySyncer struct { + mock.Mock +} + +func (m *mockSupplySyncer) PushSupplyCommitment(ctx context.Context, + assetSpec asset.Specifier, commitment RootCommitment, + updateLeaves SupplyLeaves, chainProof ChainProof, + canonicalUniverses []url.URL) (map[string]error, error) { + + args := m.Called(ctx, assetSpec, commitment, updateLeaves, chainProof, + canonicalUniverses) + + // Handle both nil and map[string]error return types. + var errorMap map[string]error + if args.Get(0) != nil { + errorMap = args.Get(0).(map[string]error) + } + + return errorMap, args.Error(1) +} diff --git a/universe/supplycommit/state_machine_test.go b/universe/supplycommit/state_machine_test.go index 7a63c9030..73e4ec83e 100644 --- a/universe/supplycommit/state_machine_test.go +++ b/universe/supplycommit/state_machine_test.go @@ -14,6 +14,7 @@ import ( "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/btcutil/txsort" "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/internal/test" @@ -119,15 +120,17 @@ type supplyCommitTestHarness struct { stateMachine *StateMachine env *Environment - mockTreeView *mockSupplyTreeView - mockCommits *mockCommitmentTracker - mockWallet *mockWallet - mockKeyRing *mockKeyRing - mockChain *mockChainBridge - mockStateLog *mockStateMachineStore - mockCache *mockIgnoreCheckerCache - mockDaemon *mockDaemonAdapters - mockErrReporter *mockErrorReporter + mockTreeView *mockSupplyTreeView + mockCommits *mockCommitmentTracker + mockWallet *mockWallet + mockKeyRing *mockKeyRing + mockChain *mockChainBridge + mockStateLog *mockStateMachineStore + mockCache *mockIgnoreCheckerCache + mockDaemon *mockDaemonAdapters + mockErrReporter *mockErrorReporter + mockAssetLookup *mockAssetLookup + mockSupplySyncer *mockSupplySyncer stateSub protofsm.StateSubscriber[Event, *Environment] } @@ -144,6 +147,8 @@ func newSupplyCommitTestHarness(t *testing.T, mockDaemon := newMockDaemonAdapters() mockErrReporter := &mockErrorReporter{} mockCache := &mockIgnoreCheckerCache{} + mockAssetLookup := &mockAssetLookup{} + mockSupplySyncer := &mockSupplySyncer{} env := &Environment{ AssetSpec: cfg.assetSpec, @@ -153,6 +158,8 @@ func newSupplyCommitTestHarness(t *testing.T, KeyRing: mockKey, Chain: mockChain, StateLog: mockStateLog, + AssetLookup: mockAssetLookup, + SupplySyncer: mockSupplySyncer, CommitConfTarget: DefaultCommitConfTarget, IgnoreCheckerCache: mockCache, } @@ -170,19 +177,21 @@ func newSupplyCommitTestHarness(t *testing.T, stateMachine := protofsm.NewStateMachine(fsmCfg) h := &supplyCommitTestHarness{ - t: t, - cfg: cfg, - stateMachine: &stateMachine, - env: env, - mockTreeView: mockTreeView, - mockCommits: mockCommits, - mockWallet: mockWallet, - mockKeyRing: mockKey, - mockChain: mockChain, - mockStateLog: mockStateLog, - mockCache: mockCache, - mockDaemon: mockDaemon, - mockErrReporter: mockErrReporter, + t: t, + cfg: cfg, + stateMachine: &stateMachine, + env: env, + mockTreeView: mockTreeView, + mockCommits: mockCommits, + mockWallet: mockWallet, + mockKeyRing: mockKey, + mockChain: mockChain, + mockStateLog: mockStateLog, + mockCache: mockCache, + mockDaemon: mockDaemon, + mockErrReporter: mockErrReporter, + mockAssetLookup: mockAssetLookup, + mockSupplySyncer: mockSupplySyncer, } h.stateSub = stateMachine.RegisterStateEvents() @@ -288,6 +297,8 @@ func (h *supplyCommitTestHarness) expectFullCommitmentCycleMocks( h.expectPsbtFunding() h.expectPsbtSigning() h.expectInsertSignedCommitTx() + h.expectAssetLookup() + h.expectSupplySyncer() h.expectBroadcastAndConfRegistration() } @@ -523,6 +534,48 @@ func (h *supplyCommitTestHarness) expectIgnoreCheckerCacheInvalidation() { h.mockCache.On("InvalidateCache", mock.Anything).Return() } +// expectAssetLookup sets up the mock expectations for AssetLookup calls. +func (h *supplyCommitTestHarness) expectAssetLookup() { + h.t.Helper() + + // Mock the asset group lookup + dummyAssetGroup := &asset.AssetGroup{ + Genesis: &asset.Genesis{ + FirstPrevOut: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0, + }, + Tag: "test-asset", + OutputIndex: 0, + Type: asset.Normal, + }, + } + + h.mockAssetLookup.On( + "QueryAssetGroupByGroupKey", mock.Anything, mock.Anything, + ).Return(dummyAssetGroup, nil).Maybe() + + // Mock the asset metadata lookup + dummyMetaReveal := &proof.MetaReveal{ + Data: []byte("test-metadata"), + Type: proof.MetaOpaque, + } + + h.mockAssetLookup.On( + "FetchAssetMetaForAsset", mock.Anything, mock.Anything, + ).Return(dummyMetaReveal, nil).Maybe() +} + +// expectSupplySyncer sets up the mock expectations for SupplySyncer calls. +func (h *supplyCommitTestHarness) expectSupplySyncer() { + h.t.Helper() + + h.mockSupplySyncer.On( + "PushSupplyCommitment", mock.Anything, mock.Anything, + mock.Anything, mock.Anything, mock.Anything, mock.Anything, + ).Return(nil, nil).Maybe() +} + // expectFreezePendingTransition sets up the mock expectation for the // FreezePendingTransition call. func (h *supplyCommitTestHarness) expectFreezePendingTransition() { @@ -609,7 +662,10 @@ func TestSupplyCommitUpdatesPendingStateTransitions(t *testing.T) { t.Parallel() testScriptKey := test.RandPubKey(t) - defaultAssetSpec := asset.NewSpecifierFromId(testAssetID) + randGroupKey := test.RandPubKey(t) + defaultAssetSpec := asset.NewSpecifierOptionalGroupPubKey( + testAssetID, randGroupKey, + ) initialMintEvent := newTestMintEvent(t, testScriptKey, randOutPoint(t)) // Verify that when the UpdatesPendingState receives a @@ -723,7 +779,10 @@ func TestSupplyCommitUpdatesPendingStateTransitions(t *testing.T) { func TestSupplyCommitTreeCreateStateTransitions(t *testing.T) { t.Parallel() - defaultAssetSpec := asset.NewSpecifierFromId(testAssetID) + randGroupKey := test.RandPubKey(t) + defaultAssetSpec := asset.NewSpecifierOptionalGroupPubKey( + testAssetID, randGroupKey, + ) mintEvent := newTestMintEvent(t, test.RandPubKey(t), randOutPoint(t)) // Verify that a CommitTickEvent received by the CommitTreeCreateState @@ -815,7 +874,11 @@ func TestSupplyCommitTreeCreateStateTransitions(t *testing.T) { func TestSupplyCommitTxCreateStateTransitions(t *testing.T) { t.Parallel() - defaultAssetSpec := asset.NewSpecifierFromId(testAssetID) + randGroupKey := test.RandPubKey(t) + defaultAssetSpec := asset.NewSpecifierOptionalGroupPubKey( + testAssetID, randGroupKey, + ) + initialTransition := SupplyStateTransition{ NewCommitment: RootCommitment{ SupplyRoot: mssmt.NewBranch( @@ -823,6 +886,7 @@ func TestSupplyCommitTxCreateStateTransitions(t *testing.T) { mssmt.NewLeafNode([]byte("right"), 0), ), }, + ChainProof: lfn.Some(ChainProof{}), } // Verify that a CreateTxEvent received by the CommitTxCreateState leads @@ -899,7 +963,11 @@ func TestSupplyCommitTxCreateStateTransitions(t *testing.T) { func TestSupplyCommitTxSignStateTransitions(t *testing.T) { t.Parallel() - defaultAssetSpec := asset.NewSpecifierFromId(testAssetID) + randGroupKey := test.RandPubKey(t) + defaultAssetSpec := asset.NewSpecifierOptionalGroupPubKey( + testAssetID, randGroupKey, + ) + dummyTx := wire.NewMsgTx(2) dummyTx.AddTxOut(&wire.TxOut{PkScript: []byte("test"), Value: 1}) @@ -911,6 +979,7 @@ func TestSupplyCommitTxSignStateTransitions(t *testing.T) { InternalKey: internalKey, TxOutIdx: 0, }, + ChainProof: lfn.Some(ChainProof{}), } // This test verifies that a SignTxEvent received by the @@ -1004,6 +1073,7 @@ func TestSupplyCommitBroadcastStateTransitions(t *testing.T) { mssmt.NewLeafNode([]byte("R"), 0), ), }, + ChainProof: lfn.Some(ChainProof{}), } // This test verifies that a BroadcastEvent received by the @@ -1021,6 +1091,8 @@ func TestSupplyCommitBroadcastStateTransitions(t *testing.T) { signedPsbt := newTestSignedPsbt(t, dummyTx) + h.expectAssetLookup() + h.expectSupplySyncer() h.expectBroadcastAndConfRegistration() broadcastEvent := &BroadcastEvent{ @@ -1046,6 +1118,8 @@ func TestSupplyCommitBroadcastStateTransitions(t *testing.T) { h.start() defer h.stopAndAssert() + h.expectAssetLookup() + h.expectSupplySyncer() h.expectCommitState() h.expectApplyStateTransition() @@ -1089,6 +1163,8 @@ func TestSupplyCommitBroadcastStateTransitions(t *testing.T) { h.start() defer h.stopAndAssert() + h.expectAssetLookup() + h.expectSupplySyncer() h.expectApplyStateTransition() // Mock the binding of dangling updates to return a new set of @@ -1186,6 +1262,7 @@ func TestSupplyCommitFinalizeStateTransitions(t *testing.T) { mssmt.NewLeafNode([]byte("leaf"), 0), ), }, + ChainProof: lfn.Some(ChainProof{}), } // This test verifies that a FinalizeEvent received by the @@ -1201,6 +1278,8 @@ func TestSupplyCommitFinalizeStateTransitions(t *testing.T) { h.start() defer h.stopAndAssert() + h.expectAssetLookup() + h.expectSupplySyncer() h.expectApplyStateTransition() h.expectBindDanglingUpdatesWithEvents([]SupplyUpdateEvent{}) h.expectIgnoreCheckerCacheInvalidation() @@ -1253,9 +1332,6 @@ func TestSupplyCommitFinalizeStateTransitions(t *testing.T) { t.Run("finalize_with_asset_id_specifier", func(t *testing.T) { assetIDSpec := asset.NewSpecifierFromId(testAssetID) - expectedErr := errors.New("group key must be specified for " + - "supply tree: unable to unwrap asset group public key") - h := newSupplyCommitTestHarness(t, &harnessCfg{ initialState: &CommitFinalizeState{ SupplyTransition: initialTransition, @@ -1265,7 +1341,8 @@ func TestSupplyCommitFinalizeStateTransitions(t *testing.T) { h.start() defer h.stopAndAssert() - h.expectApplyStateTransition() + expectedErr := errors.New("unable to fetch latest asset " + + "metadata: unable to unwrap asset group public key") h.expectFailure(expectedErr) finalizeEvent := &FinalizeEvent{} @@ -1750,7 +1827,9 @@ func TestDanglingUpdatesFullCycle(t *testing.T) { defer h.stopAndAssert() // Freeze the pending transition when we start the commit cycle, and set - // up the mocks that we need.. + // up the mocks that we need. + h.expectAssetLookup() + h.expectSupplySyncer() h.expectFreezePendingTransition() h.expectFullCommitmentCycleMocks(true) @@ -1890,6 +1969,8 @@ func TestDanglingUpdatesAcrossStates(t *testing.T) { &CommitBroadcastState{}, ) + h.expectAssetLookup() + h.expectSupplySyncer() h.expectCommitState() h.expectApplyStateTransition() diff --git a/universe/supplycommit/transitions.go b/universe/supplycommit/transitions.go index 70f55d4b2..c9b6684f9 100644 --- a/universe/supplycommit/transitions.go +++ b/universe/supplycommit/transitions.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "net/url" "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/chaincfg" @@ -207,23 +208,33 @@ func insertIntoTree(tree mssmt.Tree, leafKey [32]byte, return tree.Insert(ctx, leafKey, leafValue) } -// applyTreeUpdates takes the set of pending updates, and applies them to the +// ApplyTreeUpdates takes the set of pending updates, and applies them to the // given supply trees. It returns a new map containing the updated trees. -func applyTreeUpdates(supplyTrees SupplyTrees, +func ApplyTreeUpdates(supplyTrees SupplyTrees, pendingUpdates []SupplyUpdateEvent) (SupplyTrees, error) { ctx := context.Background() // Create a copy of the input map to avoid mutating the original. updatedSupplyTrees := make(SupplyTrees) - for k, v := range supplyTrees { - // Create a new tree for each entry in the map. + + // To ensure consistency, we'll create a new empty tree for any subtree + // types that don't exist in the given subtree map. + for _, subtreeType := range AllSupplySubTrees { + subtree, exists := supplyTrees[subtreeType] + if !exists { + updatedSupplyTrees[subtreeType] = + mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + continue + } + + // Copy existing subtree to the new map. newTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) - if err := v.Copy(ctx, newTree); err != nil { + if err := subtree.Copy(ctx, newTree); err != nil { return nil, fmt.Errorf("unable to copy tree: %w", err) } - updatedSupplyTrees[k] = newTree + updatedSupplyTrees[subtreeType] = newTree } // TODO(roasbeef): make new copy routine, passes in tree to copy into @@ -262,6 +273,44 @@ func applyTreeUpdates(supplyTrees SupplyTrees, return updatedSupplyTrees, nil } +// UpdateRootSupplyTree takes the given root supply tree, and updates it with +// the set of subtrees. It returns a new tree instance with the updated values. +func UpdateRootSupplyTree(ctx context.Context, rootTree mssmt.Tree, + subTrees SupplyTrees) (mssmt.Tree, error) { + + updatedRoot := rootTree + + // Now we'll insert/update each of the read subtrees into the root + // supply tree. + for treeType, subTree := range subTrees { + subTreeRoot, err := subTree.Root(ctx) + if err != nil { + return nil, fmt.Errorf("unable to fetch "+ + "sub-tree root: %w", err) + } + + if subTreeRoot.NodeSum() == 0 { + continue + } + + rootTreeLeaf := mssmt.NewLeafNode( + lnutils.ByteSlice(subTreeRoot.NodeHash()), + subTreeRoot.NodeSum(), + ) + + rootTreeKey := treeType.UniverseKey() + updatedRoot, err = insertIntoTree( + updatedRoot, rootTreeKey, rootTreeLeaf, + ) + if err != nil { + return nil, fmt.Errorf("unable to insert "+ + "sub-tree into root supply tree: %w", err) + } + } + + return updatedRoot, nil +} + // ProcessEvent processes incoming events for the CommitTreeCreateState. From // this state, we'll take the set of pending changes, then create/read the // components of the sub-supply trees, then use that to create the new finalized @@ -323,7 +372,7 @@ func (c *CommitTreeCreateState) ProcessEvent(event Event, // // TODO(roasbeef): sanity check on population of map? oldSupplyTrees, err := env.TreeView.FetchSubTrees( - ctx, env.AssetSpec, + ctx, env.AssetSpec, fn.None[uint32](), ).Unpack() if err != nil { return nil, fmt.Errorf("unable to fetch old sub "+ @@ -332,7 +381,7 @@ func (c *CommitTreeCreateState) ProcessEvent(event Event, // Next, based on the type of event, we'll create a new key+leaf // to insert into the respective sub-tree. - newSupplyTrees, err := applyTreeUpdates( + newSupplyTrees, err := ApplyTreeUpdates( oldSupplyTrees, pendingUpdates, ) if err != nil { @@ -351,33 +400,12 @@ func (c *CommitTreeCreateState) ProcessEvent(event Event, "supply tree: %w", err) } - // Now we'll insert/update each of the read sub-trees into the - // root supply tree. - for treeType, subTree := range newSupplyTrees { - subTreeRoot, err := subTree.Root(ctx) - if err != nil { - return nil, fmt.Errorf("unable to fetch "+ - "sub-tree root: %w", err) - } - - if subTreeRoot.NodeSum() == 0 { - continue - } - - rootTreeLeaf := mssmt.NewLeafNode( - lnutils.ByteSlice(subTreeRoot.NodeHash()), - subTreeRoot.NodeSum(), - ) - - rootTreeKey := treeType.UniverseKey() - rootSupplyTree, err = insertIntoTree( - rootSupplyTree, rootTreeKey, rootTreeLeaf, - ) - if err != nil { - return nil, fmt.Errorf("unable to insert "+ - "sub-tree into root supply tree: %w", - err) - } + rootSupplyTree, err = UpdateRootSupplyTree( + ctx, rootSupplyTree, newSupplyTrees, + ) + if err != nil { + return nil, fmt.Errorf("unable to update root "+ + "supply tree: %w", err) } // Construct the state transition object. We'll begin to @@ -464,6 +492,7 @@ func newRootCommitment(ctx context.Context, // as an input to the new transaction. Pre-commitments are only present // on mint transactions where as the old commitment is the last // commitment that was broadcast. + var spentCommitOp fn.Option[wire.OutPoint] oldCommitment.WhenSome(func(r RootCommitment) { logger.WhenSome(func(l btclog.Logger) { l.Infof("Re-using prior commitment as outpoint=%v: %v", @@ -492,6 +521,8 @@ func newRootCommitment(ctx context.Context, TaprootInternalKey: trBip32Derivation.XOnlyPubKey, TaprootMerkleRoot: commitTapscriptRoot, }) + + spentCommitOp = fn.Some(r.CommitPoint()) }) // TODO(roasbef): do CreateTaprootSignature instead? @@ -563,11 +594,12 @@ func newRootCommitment(ctx context.Context, // // TODO(roasbeef): use diff internal key? newSupplyCommit := RootCommitment{ - Txn: newCommitTx, - TxOutIdx: 0, - InternalKey: commitInternalKey, - OutputKey: tapOutKey, - SupplyRoot: newSupplyRoot, + Txn: newCommitTx, + TxOutIdx: 0, + InternalKey: commitInternalKey, + OutputKey: tapOutKey, + SupplyRoot: newSupplyRoot, + SpentCommitment: spentCommitOp, } logger.WhenSome(func(l btclog.Logger) { @@ -1065,13 +1097,68 @@ func (c *CommitFinalizeState) ProcessEvent(event Event, prefixedLog.Infof("Finalizing supply commitment transition") + // Insert the finalized supply transition into the remote + // universe server via the syncer. + chainProof, err := c.SupplyTransition.ChainProof.UnwrapOrErr( + fmt.Errorf("supply transition in finalize state " + + "must have chain proof"), + ) + if err != nil { + return nil, err + } + + // Retrieve latest canonical universe list from the latest + // metadata for the asset group. + metadata, err := FetchLatestAssetMetadata( + ctx, env.AssetLookup, env.AssetSpec, + ) + if err != nil { + return nil, fmt.Errorf("unable to fetch latest asset "+ + "metadata: %w", err) + } + + // Insert the supply commitment into the remote universes. This + // call should block until push is complete. + canonicalUniverses := metadata.CanonicalUniverses.UnwrapOr( + []url.URL{}, + ) + + supplyLeaves, err := NewSupplyLeavesFromEvents( + c.SupplyTransition.PendingUpdates, + ) + if err != nil { + return nil, fmt.Errorf("unable to create "+ + "supply leaves from pending updates: %w", err) + } + + serverErrors, err := env.SupplySyncer.PushSupplyCommitment( + ctx, env.AssetSpec, c.SupplyTransition.NewCommitment, + supplyLeaves, chainProof, canonicalUniverses, + ) + if err != nil { + return nil, fmt.Errorf("unable to insert "+ + "supply commitment into remote universe "+ + "server via syncer: %w", err) + } + + // Log any per-server errors but continue with the operation. + // + // TODO(ffranr): Handle the case where we fail to push to + // all servers. Also, if push fails because of + // ErrPrevCommitmentNotFound then we need to sync older + // commitments first. + for serverHost, serverErr := range serverErrors { + prefixedLog.Warnf("Failed to push supply commitment "+ + "to server %s: %v", serverHost, serverErr) + } + // At this point, the commitment has been confirmed on disk, so // we can update: the state machine state on disk, and swap in // all the new supply tree information. // // First, we'll update the supply state on disk. This way when // we restart his is idempotent. - err := env.StateLog.ApplyStateTransition( + err = env.StateLog.ApplyStateTransition( ctx, env.AssetSpec, c.SupplyTransition, ) if err != nil { diff --git a/universe/supplycommit/util.go b/universe/supplycommit/util.go new file mode 100644 index 000000000..51b16d6cf --- /dev/null +++ b/universe/supplycommit/util.go @@ -0,0 +1,68 @@ +package supplycommit + +import ( + "context" + "fmt" + + "github.com/lightningnetwork/lnd/fn/v2" +) + +// CalcTotalOutstandingSupply calculates the total outstanding supply from the +// given supply subtrees. +func CalcTotalOutstandingSupply(ctx context.Context, + supplySubtrees SupplyTrees) fn.Result[uint64] { + + var total uint64 + + // Add the total minted amount if we have a mint tree. + if mintTree, ok := supplySubtrees[MintTreeType]; ok { + root, err := mintTree.Root(ctx) + if err != nil { + return fn.Err[uint64](fmt.Errorf("unable to "+ + "extract mint tree root: %w", err)) + } + + total = root.NodeSum() + } + + // Return early if there's no minted supply, ignore the other subtrees. + if total == 0 { + return fn.Ok[uint64](0) + } + + // Subtract the total burned amount if we have a burn tree. + if burnTree, ok := supplySubtrees[BurnTreeType]; ok { + root, err := burnTree.Root(ctx) + if err != nil { + return fn.Err[uint64](fmt.Errorf("unable to "+ + "extract burn tree root: %w", err)) + } + + burned := root.NodeSum() + if burned > total { + return fn.Err[uint64](fmt.Errorf("total burned %d "+ + "exceeds total outstanding %d", burned, total)) + } + + total -= burned + } + + // Subtract the total ignored amount if we have an ignore tree. + if ignoreTree, ok := supplySubtrees[IgnoreTreeType]; ok { + root, err := ignoreTree.Root(ctx) + if err != nil { + return fn.Err[uint64](fmt.Errorf("unable to "+ + "extract ignore tree root: %w", err)) + } + + ignored := root.NodeSum() + if ignored > total { + return fn.Err[uint64](fmt.Errorf("total ignored %d "+ + "exceeds total outstanding %d", ignored, total)) + } + + total -= ignored + } + + return fn.Ok[uint64](total) +} diff --git a/universe/supplycommit/util_test.go b/universe/supplycommit/util_test.go new file mode 100644 index 000000000..9a054c714 --- /dev/null +++ b/universe/supplycommit/util_test.go @@ -0,0 +1,186 @@ +package supplycommit + +import ( + "context" + "fmt" + "testing" + + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/stretchr/testify/require" +) + +// createTreeWithSum creates an in-memory mssmt tree with the specified sum. +// If sum is 0, returns an empty tree. +func createTreeWithSum(sum uint64) mssmt.Tree { + store := mssmt.NewDefaultStore() + tree := mssmt.NewCompactedTree(store) + + if sum > 0 { + // Insert a leaf with the desired sum. + // + // Use sum to create unique key. + key := [32]byte{byte(sum % 256)} + leaf := mssmt.NewLeafNode( + []byte(fmt.Sprintf("value-%d", sum)), sum, + ) + newTree, _ := tree.Insert(context.Background(), key, leaf) + return newTree + } + + return tree +} + +// TestCalcTotalOutstandingSupply tests the CalcTotalOutstandingSupply function +// with various combinations of supply trees. +func TestCalcTotalOutstandingSupply(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := []struct { + name string + supplyTrees SupplyTrees + expectedResult uint64 + expectedError string + }{ + { + name: "empty supply trees", + supplyTrees: SupplyTrees{}, + expectedResult: 0, + expectedError: "", + }, + { + name: "only mint tree with zero sum", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(0), + }, + expectedResult: 0, + expectedError: "", + }, + { + name: "only mint tree with positive sum", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + }, + expectedResult: 1000, + expectedError: "", + }, + { + name: "mint and burn trees", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + BurnTreeType: createTreeWithSum(300), + }, + expectedResult: 700, + expectedError: "", + }, + { + name: "mint and ignore trees", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + IgnoreTreeType: createTreeWithSum(200), + }, + expectedResult: 800, + expectedError: "", + }, + { + name: "all three tree types", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + BurnTreeType: createTreeWithSum(200), + IgnoreTreeType: createTreeWithSum(100), + }, + expectedResult: 700, + expectedError: "", + }, + { + name: "burned amount exceeds total minted", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(500), + BurnTreeType: createTreeWithSum(600), + }, + expectedResult: 0, + expectedError: "total burned 600 exceeds total " + + "outstanding 500", + }, + { + name: "ignored amount exceeds remaining supply", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + BurnTreeType: createTreeWithSum(200), + IgnoreTreeType: createTreeWithSum(900), + }, + expectedResult: 0, + expectedError: "total ignored 900 exceeds total " + + "outstanding 800", + }, + { + name: "burn exactly equals mint", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(500), + BurnTreeType: createTreeWithSum(500), + }, + expectedResult: 0, + expectedError: "", + }, + { + name: "ignore exactly equals remaining supply", + supplyTrees: SupplyTrees{ + MintTreeType: createTreeWithSum(1000), + BurnTreeType: createTreeWithSum(300), + IgnoreTreeType: createTreeWithSum(700), + }, + expectedResult: 0, + expectedError: "", + }, + { + name: "only burn tree (no mint)", + supplyTrees: SupplyTrees{ + BurnTreeType: createTreeWithSum(100), + }, + expectedResult: 0, + expectedError: "", + }, + { + name: "only ignore tree (no mint)", + supplyTrees: SupplyTrees{ + IgnoreTreeType: createTreeWithSum(100), + }, + expectedResult: 0, + expectedError: "", + }, + } + + for idx := range testCases { + tc := testCases[idx] + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + result := CalcTotalOutstandingSupply( + ctx, tc.supplyTrees, + ) + + if tc.expectedError != "" { + require.True( + t, result.IsErr(), + "expected error but got success", + ) + err := result.Err() + require.Contains( + t, err.Error(), tc.expectedError, + ) + + return + } + + require.True( + t, result.IsOk(), + "expected success but got error: %v", + result.Err(), + ) + actual := result.UnwrapOr(0) + require.Equal(t, tc.expectedResult, actual) + }) + } +} diff --git a/universe/supplyverifier/env.go b/universe/supplyverifier/env.go new file mode 100644 index 000000000..013ec3400 --- /dev/null +++ b/universe/supplyverifier/env.go @@ -0,0 +1,113 @@ +package supplyverifier + +import ( + "context" + "fmt" + + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + lfn "github.com/lightningnetwork/lnd/fn/v2" +) + +var ( + // ErrCommitmentNotFound is returned when a supply commitment is not + // found. + ErrCommitmentNotFound = fmt.Errorf("commitment not found") + + // ErrPrevCommitmentNotFound is returned when we try to fetch a + // previous supply commitment, but it is not found in the database. + ErrPrevCommitmentNotFound = fmt.Errorf("previous supply commitment " + + "not found") +) + +// SupplyCommitView is an interface that is used to look up supply commitments +// and pre-commitments. +type SupplyCommitView interface { + // UnspentPrecommits returns the set of unspent pre-commitments for a + // given asset spec. + UnspentPrecommits(ctx context.Context, + assetSpec asset.Specifier) lfn.Result[supplycommit.PreCommits] + + // SupplyCommit returns the latest supply commitment for a given asset + // spec. + SupplyCommit(ctx context.Context, + assetSpec asset.Specifier) supplycommit.RootCommitResp + + // FetchCommitmentByOutpoint fetches a supply commitment by its outpoint + // and group key. If no commitment is found, it returns + // ErrCommitmentNotFound. + FetchCommitmentByOutpoint(ctx context.Context, + assetSpec asset.Specifier, + outpoint wire.OutPoint) (*supplycommit.RootCommitment, error) + + // FetchCommitmentBySpentOutpoint fetches a supply commitment by the + // outpoint it spent and group key. If no commitment is found, it + // returns ErrCommitmentNotFound. + FetchCommitmentBySpentOutpoint(ctx context.Context, + assetSpec asset.Specifier, + spentOutpoint wire.OutPoint) (*supplycommit.RootCommitment, + error) + + // FetchStartingCommitment fetches the very first supply commitment of + // an asset group. If no commitment is found, it returns + // ErrCommitmentNotFound. + FetchStartingCommitment(ctx context.Context, + assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) + + // InsertSupplyCommit inserts a supply commitment into the database. + InsertSupplyCommit(ctx context.Context, + assetSpec asset.Specifier, commit supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error +} + +// SupplyTreeView is an interface that is used to look up the root (upper) +// supply tree, subtrees, and leaves. +// +// nolint: lll +type SupplyTreeView interface { + // FetchSupplyTrees returns a copy of the root supply tree and subtrees + // for the given asset spec. + FetchSupplyTrees(ctx context.Context, spec asset.Specifier) (mssmt.Tree, + *supplycommit.SupplyTrees, error) + + // FetchSubTrees returns all the subtrees for the given asset spec. + FetchSubTrees(ctx context.Context, assetSpec asset.Specifier, + blockHeightEnd fn.Option[uint32]) lfn.Result[supplycommit.SupplyTrees] + + // FetchSupplyLeavesByHeight fetches all supply leaves for a given asset + // specifier within a given block height range. + FetchSupplyLeavesByHeight(ctx context.Context, spec asset.Specifier, + startHeight, + endHeight uint32) lfn.Result[supplycommit.SupplyLeaves] +} + +// Environment is a struct that holds all the dependencies that the supply +// verifier needs to carry out its duties. +type Environment struct { + // AssetSpec is the asset specifier that is used to identify the asset + // that we're maintaining a supply commit for. + AssetSpec asset.Specifier + + // Chain is our access to the current main chain. + Chain tapgarden.ChainBridge + + // SupplyCommitView allows us to look up supply commitments and + // pre-commitments. + SupplyCommitView SupplyCommitView + + // ErrChan is the channel that is used to send errors to the caller. + ErrChan chan<- error + + // QuitChan is the channel that is used to signal that the state + // machine should quit. + QuitChan <-chan struct{} +} + +// Name returns the name of the environment. +func (e *Environment) Name() string { + return fmt.Sprintf("supply_verifier(%s)", e.AssetSpec.String()) +} diff --git a/universe/supplyverifier/log.go b/universe/supplyverifier/log.go new file mode 100644 index 000000000..481702912 --- /dev/null +++ b/universe/supplyverifier/log.go @@ -0,0 +1,26 @@ +package supplyverifier + +import ( + "github.com/btcsuite/btclog/v2" +) + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "SUPV" + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log = btclog.Disabled + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go new file mode 100644 index 000000000..f88c9233f --- /dev/null +++ b/universe/supplyverifier/manager.go @@ -0,0 +1,681 @@ +package supplyverifier + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/lndclient" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightningnetwork/lnd/msgmux" + "github.com/lightningnetwork/lnd/protofsm" +) + +const ( + // DefaultTimeout is the context guard default timeout. + DefaultTimeout = 30 * time.Second +) + +// DaemonAdapters is a wrapper around the protofsm.DaemonAdapters interface +// with the addition of Start and Stop methods. +type DaemonAdapters interface { + protofsm.DaemonAdapters + + // Start starts the daemon adapters handler service. + Start() error + + // Stop stops the daemon adapters handler service. + Stop() error +} + +// StateMachineStore is an interface that allows the state machine to persist +// its state across restarts. This is used to track the state of the state +// machine for supply verification. +type StateMachineStore interface { + // CommitState is used to commit the state of the state machine to disk. + CommitState(context.Context, asset.Specifier, State) error + + // FetchState attempts to fetch the state of the state machine for the + // target asset specifier. If the state machine doesn't exist, then a + // default state will be returned. + FetchState(context.Context, asset.Specifier) (State, error) +} + +// IssuanceSubscriptions allows verifier state machines to subscribe to +// asset group issuance events. +type IssuanceSubscriptions interface { + // RegisterSubscriber registers an event receiver to receive future + // issuance events. + RegisterSubscriber(receiver *fn.EventReceiver[fn.Event], + deliverExisting bool, _ bool) error +} + +// ManagerCfg is the configuration for the +// Manager. It contains all the dependencies needed to +// manage multiple supply verifier state machines, one for each asset group. +type ManagerCfg struct { + // Chain is our access to the current main chain. + Chain tapgarden.ChainBridge + + // AssetLookup is used to look up asset information such as asset groups + // and asset metadata. + AssetLookup supplycommit.AssetLookup + + // Lnd is a collection of useful LND clients. + Lnd *lndclient.LndServices + + // SupplyCommitView allows us to look up supply commitments and + // pre-commitments. + SupplyCommitView SupplyCommitView + + // SupplyTreeView is used to fetch supply leaves by height. + SupplyTreeView SupplyTreeView + + // GroupFetcher is used to fetch asset group information. + GroupFetcher tapgarden.GroupFetcher + + // IssuanceSubscriptions registers verifier state machines to receive + // new asset group issuance event notifications. + IssuanceSubscriptions IssuanceSubscriptions + + // DaemonAdapters is a set of adapters that allow the state machine to + // interact with external daemons whilst processing internal events. + DaemonAdapters DaemonAdapters + + // StateLog is the main state log that is used to track the state of the + // state machine. This is used to persist the state of the state machine + // across restarts. + StateLog StateMachineStore + + // ErrChan is the channel that is used to send errors to the caller. + ErrChan chan<- error +} + +// Manager is a manager for multiple supply verifier state machines, one for +// each asset group. It is responsible for starting and stopping the state +// machines, as well as forwarding events to them. +type Manager struct { + // cfg is the configuration for the multi state machine manager. + cfg ManagerCfg + + // smCache is a cache that maps asset group public keys to their + // supply verifier state machines. + smCache *stateMachineCache + + // ContextGuard provides a wait group and main quit channel that can be + // used to create guarded contexts. + *fn.ContextGuard + + startOnce sync.Once + stopOnce sync.Once +} + +// NewManager creates a new multi state machine manager. +func NewManager(cfg ManagerCfg) *Manager { + return &Manager{ + cfg: cfg, + ContextGuard: &fn.ContextGuard{ + DefaultTimeout: DefaultTimeout, + Quit: make(chan struct{}), + }, + } +} + +// Start starts the multi state machine manager. +func (m *Manager) Start() error { + m.startOnce.Do(func() { + // Initialize the state machine cache. + m.smCache = newStateMachineCache() + }) + + return nil +} + +// Stop stops the multi state machine manager, which in turn stops all asset +// group key specific supply verifier state machines. +func (m *Manager) Stop() error { + m.stopOnce.Do(func() { + // Cancel the state machine context to signal all state machines + // to stop. + close(m.Quit) + + // Stop all state machines. + m.smCache.StopAll() + }) + + return nil +} + +// fetchStateMachine retrieves a state machine from the cache or creates a +// new one if it doesn't exist. If a new state machine is created, it is also +// started. +func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, + error) { + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return nil, fmt.Errorf("asset specifier missing group key: %w", + err) + } + + // Check if the state machine for the asset group already exists in the + // cache. + sm, ok := m.smCache.Get(*groupKey) + if ok { + return sm, nil + } + + // If the state machine is not found, create a new one. + env := &Environment{ + AssetSpec: assetSpec, + Chain: m.cfg.Chain, + SupplyCommitView: m.cfg.SupplyCommitView, + ErrChan: m.cfg.ErrChan, + QuitChan: m.Quit, + } + + // Before we start the state machine, we'll need to fetch the current + // state from disk, to see if we need to emit any new events. + ctx, cancel := m.WithCtxQuitNoTimeout() + defer cancel() + + initialState, err := m.cfg.StateLog.FetchState(ctx, assetSpec) + if err != nil { + return nil, fmt.Errorf("unable to fetch current state: %w", err) + } + + // Create a new error reporter for the state machine. + errorReporter := NewErrorReporter(assetSpec) + + fsmCfg := protofsm.StateMachineCfg[Event, *Environment]{ + ErrorReporter: &errorReporter, + InitialState: initialState, + Env: env, + Daemon: m.cfg.DaemonAdapters, + } + newSm := protofsm.NewStateMachine[Event, *Environment](fsmCfg) + + // Ensure that the state machine is running. We use the manager's + // context guard to derive a sub context which will be cancelled when + // the manager is stopped. + smCtx, _ := m.WithCtxQuitNoTimeout() + newSm.Start(smCtx) + + // For supply verifier, we always start with an InitEvent to begin + // the verification process. + newSm.SendEvent(ctx, &InitEvent{}) + + m.smCache.Set(*groupKey, &newSm) + + return &newSm, nil +} + +// InsertSupplyCommit stores a verified supply commitment for the given asset +// group in the node's local database. +func (m *Manager) InsertSupplyCommit(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + // First, we verify the supply commitment to ensure it is valid and + // consistent with the given supply leaves. + verifier, err := NewVerifier( + VerifierCfg{ + ChainBridge: m.cfg.Chain, + AssetLookup: m.cfg.AssetLookup, + Lnd: m.cfg.Lnd, + GroupFetcher: m.cfg.GroupFetcher, + SupplyCommitView: m.cfg.SupplyCommitView, + SupplyTreeView: m.cfg.SupplyTreeView, + }, + ) + if err != nil { + return fmt.Errorf("unable to create supply verifier: %w", err) + } + + err = verifier.VerifyCommit(ctx, assetSpec, commitment, leaves) + if err != nil { + return fmt.Errorf("supply commitment verification failed: %w", + err) + } + + return m.cfg.SupplyCommitView.InsertSupplyCommit( + ctx, assetSpec, commitment, leaves, + ) +} + +// SupplyCommitSnapshot packages the on-chain state of a supply commitment at a +// specific block height: the root commitment, the supply tree, +// the subtrees at that height, the new leaves since the previous commitment, +// and the chain proof that links the leaves to the root. +// +// TODO(guggero): Replace call sites that pass three separate params with +// this struct. +type SupplyCommitSnapshot struct { + // Commitment is the root supply commitment that commits to all supply + // leaves up to the block height recorded in CommitmentBlock. + Commitment supplycommit.RootCommitment + + // SupplyTree is the upper supply tree as of CommitmentBlock. + SupplyTree mssmt.Tree + + // Subtrees are the supply subtrees as of CommitmentBlock. + Subtrees supplycommit.SupplyTrees + + // Leaves are the supply leaves added after the previous commitment's + // block height (exclusive) and up to this commitment's block height + // (inclusive). + Leaves supplycommit.SupplyLeaves +} + +// LocatorType is an enum that indicates the type of locator used to identify +// a supply commitment in the database. +type LocatorType uint8 + +const ( + // LocatorTypeOutpoint indicates that the locator type is the outpoint + // of a supply commitment transaction output. + LocatorTypeOutpoint LocatorType = 0 + + // LocatorTypeSpentOutpoint indicates that the locator type is the + // outpoint spent by a supply commitment transaction. + LocatorTypeSpentOutpoint LocatorType = 1 + + // LocatorTypeVeryFirst indicates that the locator type is the very + // first supply commitment transaction output for an asset group. + LocatorTypeVeryFirst LocatorType = 2 +) + +// CommitLocator is used to locate a supply commitment in the database based on +// its on-chain characteristics. +type CommitLocator struct { + // LocatorType indicates the type of locator used to identify the + // supply commitment. + LocatorType LocatorType + + // Outpoint is the outpoint used to locate a supply commitment. + // Depending on the LocatorType, this may be the outpoint created by a + // supply commitment, the outpoint spent by a supply commitment, or an + // empty outpoint for the very first supply commitment of an asset + // group. + Outpoint wire.OutPoint +} + +// BlockHeightRange represents a range of block heights, inclusive of both +// start and end. +type BlockHeightRange struct { + // Start is the starting block height of the range. + Start uint32 + + // End is the ending block height of the range. + End uint32 +} + +// fetchCommitmentBlockRange returns the block height range for fetching supply +// leaves for the given commitment. +// +// The range starts from the block height of the previous commitment +// (exclusive) to the block height of the given commitment (inclusive). If +// there is no previous commitment, the range starts from block height zero. +func (m *Manager) fetchCommitmentBlockRange(ctx context.Context, + assetSpec asset.Specifier, + commitment supplycommit.RootCommitment) (BlockHeightRange, error) { + + var ( + zero BlockHeightRange + view = m.cfg.SupplyCommitView + ) + + commitmentBlock, err := commitment.CommitmentBlock.UnwrapOrErr( + supplycommit.ErrNoBlockInfo, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch commitment block: %w", + err) + } + + // Determine the block height range for fetching supply leaves. + // + // If there is no preceding commitment, the block height range starts + // from zero. + if commitment.SpentCommitment.IsNone() { + heightRange := BlockHeightRange{ + Start: 0, + End: commitmentBlock.Height, + } + + return heightRange, nil + } + + // Otherwise, we need to fetch the previous commitment to determine + // the starting block height. + prevCommitmentOutPoint, err := commitment.SpentCommitment.UnwrapOrErr( + fmt.Errorf("supply commitment unexpectedly has no spent " + + "outpoint"), + ) + if err != nil { + return zero, err + } + + spentCommitment, err := view.FetchCommitmentByOutpoint( + ctx, assetSpec, prevCommitmentOutPoint, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch commitment by "+ + "outpoint: %w", err) + } + + spentCommitmentBlock, err := spentCommitment.CommitmentBlock. + UnwrapOrErr(supplycommit.ErrNoBlockInfo) + if err != nil { + return zero, fmt.Errorf("unable to fetch spent commitment "+ + "block: %w", err) + } + + return BlockHeightRange{ + Start: spentCommitmentBlock.Height, + End: commitmentBlock.Height, + }, nil +} + +// FetchCommitment fetches the commitment with the given locator from the local +// database view. +func (m *Manager) FetchCommitment(ctx context.Context, + assetSpec asset.Specifier, locator CommitLocator) (SupplyCommitSnapshot, + error) { + + var ( + zero SupplyCommitSnapshot + err error + + view = m.cfg.SupplyCommitView + commitment *supplycommit.RootCommitment + ) + switch locator.LocatorType { + case LocatorTypeOutpoint: + commitment, err = view.FetchCommitmentByOutpoint( + ctx, assetSpec, locator.Outpoint, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch commitment "+ + "by outpoint: %w", err) + } + + case LocatorTypeSpentOutpoint: + commitment, err = view.FetchCommitmentBySpentOutpoint( + ctx, assetSpec, locator.Outpoint, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch commitment "+ + "by spent outpoint: %w", err) + } + + case LocatorTypeVeryFirst: + commitment, err = view.FetchStartingCommitment(ctx, assetSpec) + if err != nil { + return zero, fmt.Errorf("unable to fetch starting "+ + "commitment: %w", err) + } + + default: + return zero, fmt.Errorf("unknown supply commit locator "+ + "type: %d", locator.LocatorType) + } + + // Fetch block height range for fetching supply leaves. + blockHeightRange, err := m.fetchCommitmentBlockRange( + ctx, assetSpec, *commitment, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch block height "+ + "range: %w", err) + } + + leaves, err := m.cfg.SupplyTreeView.FetchSupplyLeavesByHeight( + ctx, assetSpec, blockHeightRange.Start, blockHeightRange.End, + ).Unpack() + if err != nil { + return zero, fmt.Errorf("unable to fetch supply leaves for "+ + "asset specifier %s: %w", assetSpec.String(), err) + } + + // Fetch supply subtrees at block height. + subtrees, err := m.cfg.SupplyTreeView.FetchSubTrees( + ctx, assetSpec, fn.Some(blockHeightRange.End), + ).Unpack() + if err != nil { + return zero, fmt.Errorf("unable to fetch supply subtrees for "+ + "asset specifier %s: %w", assetSpec.String(), err) + } + + // Formulate supply tree at correct height from subtrees. + bareSupplyTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + supplyTree, err := supplycommit.UpdateRootSupplyTree( + ctx, bareSupplyTree, subtrees, + ) + if err != nil { + return zero, fmt.Errorf("unable to formulate supply tree "+ + "for asset specifier %s: %w", assetSpec.String(), err) + } + + // Sanity check that the derived upper supply tree root matches the + // commitment. + expectedSupplyRoot, err := supplyTree.Root(ctx) + if err != nil { + return zero, fmt.Errorf("unable to fetch upper supply tree "+ + "root for asset specifier %s: %w", + assetSpec.String(), err) + } + + expectedRootHash := expectedSupplyRoot.NodeHash() + actualRootHash := commitment.SupplyRoot.NodeHash() + if expectedRootHash != actualRootHash { + return zero, fmt.Errorf("supply root mismatch for asset "+ + "specifier %s: expected %s, got %s", + assetSpec.String(), expectedRootHash, actualRootHash) + } + + return SupplyCommitSnapshot{ + Commitment: *commitment, + SupplyTree: supplyTree, + Subtrees: subtrees, + Leaves: leaves, + }, nil +} + +// CanHandle determines if the state machine associated with the given asset +// specifier can handle the given message. If a state machine for the asset +// group does not exist, it will be created and started. +func (m *Manager) CanHandle(assetSpec asset.Specifier, + msg msgmux.PeerMsg) (bool, error) { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return false, fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + return sm.CanHandle(msg), nil +} + +// Name returns the name of the state machine associated with the given asset +// specifier. If a state machine for the asset group does not exist, it will be +// created and started. +func (m *Manager) Name(assetSpec asset.Specifier) (string, error) { + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return "", fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + return sm.Name(), nil +} + +// SendMessage sends a message to the state machine associated with the given +// asset specifier. If a state machine for the asset group does not exist, it +// will be created and started. +func (m *Manager) SendMessage(ctx context.Context, + assetSpec asset.Specifier, msg msgmux.PeerMsg) (bool, error) { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return false, fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + return sm.SendMessage(ctx, msg), nil +} + +// CurrentState returns the current state of the state machine associated with +// the given asset specifier. If a state machine for the asset group does not +// exist, it will be created and started. +func (m *Manager) CurrentState(assetSpec asset.Specifier) ( + protofsm.State[Event, *Environment], error) { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return nil, fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + return sm.CurrentState() +} + +// RegisterStateEvents registers a state event subscriber with the state machine +// associated with the given asset specifier. If a state machine for the asset +// group does not exist, it will be created and started. +func (m *Manager) RegisterStateEvents( + assetSpec asset.Specifier) (StateSub, error) { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return nil, fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + return sm.RegisterStateEvents(), nil +} + +// RemoveStateSub removes a state event subscriber from the state machine +// associated with the given asset specifier. If a state machine for the asset +// group does not exist, it will be created and started. +func (m *Manager) RemoveStateSub(assetSpec asset.Specifier, + sub StateSub) error { + + sm, err := m.fetchStateMachine(assetSpec) + if err != nil { + return fmt.Errorf("unable to get or create state "+ + "machine: %w", err) + } + + sm.RemoveStateSub(sub) + + return nil +} + +// stateMachineCache is a thread-safe cache mapping an asset group's public key +// to its supply verifier state machine. +type stateMachineCache struct { + // mu is a mutex that is used to synchronize access to the cache. + mu sync.RWMutex + + // cache is a map of serialized asset group public keys to their + // supply verifier state machines. + cache map[asset.SerializedKey]*StateMachine +} + +// newStateMachineCache creates a new supply verifier state machine cache. +func newStateMachineCache() *stateMachineCache { + return &stateMachineCache{ + cache: make(map[asset.SerializedKey]*StateMachine), + } +} + +// StopAll stops all state machines in the cache. +func (c *stateMachineCache) StopAll() { + c.mu.RLock() + defer c.mu.RUnlock() + + // Iterate over the cache and append each state machine to the slice. + for _, sm := range c.cache { + // Sanity check: ensure sm is not nil. + if sm == nil { + continue + } + + // Stop the state machine. + sm.Stop() + } +} + +// Get retrieves a state machine from the cache. +func (c *stateMachineCache) Get(groupPubKey btcec.PublicKey) (*StateMachine, + bool) { + + // Serialize the group key. + serializedGroupKey := asset.ToSerialized(&groupPubKey) + + c.mu.RLock() + defer c.mu.RUnlock() + + sm, ok := c.cache[serializedGroupKey] + return sm, ok +} + +// Set adds a state machine to the cache. +func (c *stateMachineCache) Set(groupPubKey btcec.PublicKey, sm *StateMachine) { + // Serialize the group key. + serializedGroupKey := asset.ToSerialized(&groupPubKey) + + c.mu.Lock() + defer c.mu.Unlock() + + // If the state machine already exists, return without updating it. + // This helps to ensure that we always have a pointer to every state + // machine in the cache, even if it is not currently active. + if _, exists := c.cache[serializedGroupKey]; exists { + return + } + + c.cache[serializedGroupKey] = sm +} + +// Delete removes a state machine from the cache. +func (c *stateMachineCache) Delete(groupPubKey btcec.PublicKey) { + // Serialize the group key. + serializedGroupKey := asset.ToSerialized(&groupPubKey) + + c.mu.Lock() + defer c.mu.Unlock() + + delete(c.cache, serializedGroupKey) +} + +// ErrorReporter is an asset specific error reporter that can be used to +// report errors that occur during the operation of the asset group supply +// verifier state machine. +type ErrorReporter struct { + // assetSpec is the asset specifier that identifies the asset group. + assetSpec asset.Specifier +} + +// NewErrorReporter creates a new ErrorReporter for the given asset specifier +// state machine. +func NewErrorReporter(assetSpec asset.Specifier) ErrorReporter { + return ErrorReporter{ + assetSpec: assetSpec, + } +} + +// ReportError reports an error that occurred during the operation of the +// asset group supply verifier state machine. +func (r *ErrorReporter) ReportError(err error) { + log.Errorf("supply verifier state machine (asset_spec=%s): %v", + r.assetSpec.String(), err) +} diff --git a/universe/supplyverifier/states.go b/universe/supplyverifier/states.go new file mode 100644 index 000000000..1bc5f04a1 --- /dev/null +++ b/universe/supplyverifier/states.go @@ -0,0 +1,62 @@ +package supplyverifier + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/protofsm" +) + +var ( + // ErrInvalidStateTransition is returned when we receive an unexpected + // event for a given state. + ErrInvalidStateTransition = fmt.Errorf("invalid state transition") +) + +// Event is a special interface used to create the equivalent of a sum-type, but +// using a "sealed" interface. +type Event interface { + eventSealed() +} + +// Events is a special type constraint that enumerates all the possible protocol +// events. +type Events interface { +} + +// StateTransition is the StateTransition type specific to the supply verifier +// state machine. +type StateTransition = protofsm.StateTransition[Event, *Environment] + +// State is our sum-type ish interface that represents the current universe +// commitment verification state. +type State interface { + stateSealed() + IsTerminal() bool + ProcessEvent(Event, *Environment) (*StateTransition, error) + String() string +} + +// StateMachine is a state machine that handles verifying the on-chain supply +// commitment for a given asset. +type StateMachine = protofsm.StateMachine[Event, *Environment] + +// Config is a configuration struct that is used to initialize a new supply +// verifier state machine. +type Config = protofsm.StateMachineCfg[Event, *Environment] + +// FsmState is a type alias for the state of the supply verifier state machine. +type FsmState = protofsm.State[Event, *Environment] + +// FsmEvent is a type alias for the event type of the supply verifier state +// machine. +type FsmEvent = protofsm.EmittedEvent[Event] + +// StateSub is a type alias for the state subscriber of the supply verifier +// state machine. +type StateSub = protofsm.StateSubscriber[Event, *Environment] + +// InitEvent is the first event that is sent to the state machine. +type InitEvent struct{} + +// eventSealed is a special method that is used to seal the interface. +func (i *InitEvent) eventSealed() {} diff --git a/universe/supplyverifier/syncer.go b/universe/supplyverifier/syncer.go new file mode 100644 index 000000000..1a24ea591 --- /dev/null +++ b/universe/supplyverifier/syncer.go @@ -0,0 +1,224 @@ +package supplyverifier + +import ( + "context" + "fmt" + "net/url" + + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/universe" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" +) + +// UniverseClient is an interface that represents a client connection to a +// remote universe server. +type UniverseClient interface { + // InsertSupplyCommit inserts a supply commitment for a specific + // asset group into the remote universe server. + InsertSupplyCommit(ctx context.Context, assetSpec asset.Specifier, + commitment supplycommit.RootCommitment, + updateLeaves supplycommit.SupplyLeaves, + chainProof supplycommit.ChainProof) error + + // Close closes the fetcher and cleans up any resources. + Close() error +} + +// UniverseClientFactory is a function type that creates UniverseClient +// instances for a given universe server address. +type UniverseClientFactory func(serverAddr universe.ServerAddr) (UniverseClient, + error) + +// SupplySyncerStore is an interface for storing synced leaves and state. +type SupplySyncerStore interface { + // LogSupplyCommitPush logs that a supply commitment and its leaves + // have been successfully pushed to a remote universe server. + LogSupplyCommitPush(ctx context.Context, serverAddr universe.ServerAddr, + assetSpec asset.Specifier, + commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error +} + +// UniverseFederationView is an interface that provides a view of the +// federation of universe servers. +type UniverseFederationView interface { + // UniverseServers returns a list of all known universe servers in + // the federation. + UniverseServers(ctx context.Context) ([]universe.ServerAddr, error) +} + +// SupplySyncerConfig is a configuration struct for creating a new +// SupplySyncer instance. +type SupplySyncerConfig struct { + // ClientFactory is a factory function that creates UniverseClient + // instances for specific universe server addresses. + ClientFactory UniverseClientFactory + + // Store is used to persist supply leaves to the local database. + Store SupplySyncerStore + + // UniverseFederationView is used to fetch the list of known + // universe servers in the federation. + UniverseFederationView UniverseFederationView +} + +// SupplySyncer is a struct that is responsible for retrieving supply leaves +// from a universe. +type SupplySyncer struct { + // cfg is the configuration for the SupplySyncer. + cfg SupplySyncerConfig +} + +// NewSupplySyncer creates a new SupplySyncer with a factory function for +// creating UniverseClient instances and a store for persisting leaves. +func NewSupplySyncer(cfg SupplySyncerConfig) SupplySyncer { + return SupplySyncer{ + cfg: cfg, + } +} + +// pushUniServer pushes the supply commitment to a specific universe server. +func (s *SupplySyncer) pushUniServer(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + updateLeaves supplycommit.SupplyLeaves, + chainProof supplycommit.ChainProof, + serverAddr universe.ServerAddr) error { + + // Create a client for the specific universe server address. + client, err := s.cfg.ClientFactory(serverAddr) + if err != nil { + return fmt.Errorf("unable to create universe client: %w", err) + } + + // Ensure the client is properly closed when we're done. + defer func() { + if closeErr := client.Close(); closeErr != nil { + log.Errorf("unable to close universe client: %v", + closeErr) + } + }() + + err = client.InsertSupplyCommit( + ctx, assetSpec, commitment, updateLeaves, chainProof, + ) + if err != nil { + return fmt.Errorf("unable to insert supply leaves: %w", err) + } + + // Log the successful insertion to the remote universe. + err = s.cfg.Store.LogSupplyCommitPush( + ctx, serverAddr, assetSpec, commitment, updateLeaves, + ) + if err != nil { + return fmt.Errorf("unable to log supply commit push: %w", err) + } + + return nil +} + +// fetchServerAddrs retrieves the list of universe server addresses that +// the syncer uses to interact with remote servers. +func (s *SupplySyncer) fetchServerAddrs(ctx context.Context, + canonicalUniverses []url.URL) ([]universe.ServerAddr, error) { + + var zero []universe.ServerAddr + + // Fetch latest set of universe federation server addresses. + fedAddrs, err := s.cfg.UniverseFederationView.UniverseServers(ctx) + if err != nil { + return zero, fmt.Errorf("unable to fetch universe servers: %w", + err) + } + + // Formulate final unique list of universe server addresses to push to. + uniqueAddrs := make(map[string]universe.ServerAddr) + for idx := range canonicalUniverses { + addrUrl := canonicalUniverses[idx] + serverAddr := universe.NewServerAddrFromStr(addrUrl.String()) + uniqueAddrs[serverAddr.HostStr()] = serverAddr + } + + for idx := range fedAddrs { + serverAddr := fedAddrs[idx] + uniqueAddrs[serverAddr.HostStr()] = serverAddr + } + + targetAddrs := make([]universe.ServerAddr, 0, len(uniqueAddrs)) + for _, serverAddr := range uniqueAddrs { + targetAddrs = append(targetAddrs, serverAddr) + } + + return targetAddrs, nil +} + +// PushSupplyCommitment pushes a supply commitment to the remote universe +// server. This function should block until the sync insertion is complete. +// +// Returns a map of per-server errors keyed by server host string and +// an internal error. If all pushes succeed, both return values are nil. +// If some pushes fail, the map contains only the failed servers and +// their corresponding errors. If there's an internal/system error that +// prevents the operation from proceeding, it's returned as the second +// value. +// +// NOTE: This function must be thread safe. +func (s *SupplySyncer) PushSupplyCommitment(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + updateLeaves supplycommit.SupplyLeaves, + chainProof supplycommit.ChainProof, + canonicalUniverses []url.URL) (map[string]error, error) { + + targetAddrs, err := s.fetchServerAddrs(ctx, canonicalUniverses) + if err != nil { + // This is an internal error that prevents the operation from + // proceeding. + return nil, fmt.Errorf("unable to fetch target universe "+ + "server addresses: %w", err) + } + + // Push the supply commitment to all target universe servers in + // parallel. Any error for a specific server will be captured in the + // pushErrs map and will not abort the entire operation. + pushErrs, err := fn.ParSliceErrCollect( + ctx, targetAddrs, func(ctx context.Context, + serverAddr universe.ServerAddr) error { + + // Push the supply commitment to the universe server. + err := s.pushUniServer( + ctx, assetSpec, commitment, updateLeaves, + chainProof, serverAddr, + ) + if err != nil { + return fmt.Errorf("unable to push supply "+ + "commitment (server_addr_id=%d, "+ + "server_addr_host_str=%s): %w", + serverAddr.ID, serverAddr.HostStr(), + err) + } + + return nil + }, + ) + if err != nil { + // This should not happen with ParSliceErrCollect, but handle it + // as an internal error. + return nil, fmt.Errorf("unable to push supply commitment: %w", + err) + } + + // Build a map of errors encountered while pushing to each server. + // If there were no errors, return nil for both values. + if len(pushErrs) == 0 { + return nil, nil + } + + errorMap := make(map[string]error) + for idx, fetchErr := range pushErrs { + serverAddr := targetAddrs[idx] + hostStr := serverAddr.HostStr() + errorMap[hostStr] = fetchErr + } + + return errorMap, nil +} diff --git a/universe/supplyverifier/verifier.go b/universe/supplyverifier/verifier.go new file mode 100644 index 000000000..58894c4ff --- /dev/null +++ b/universe/supplyverifier/verifier.go @@ -0,0 +1,641 @@ +package supplyverifier + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/lightninglabs/lndclient" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" +) + +// VerifierCfg is the configuration for the verifier. +type VerifierCfg struct { + // Chain is our access to the chain. + ChainBridge tapgarden.ChainBridge + + // AssetLookup is used to look up asset information such as asset groups + // and asset metadata. + AssetLookup supplycommit.AssetLookup + + // Lnd is a collection of useful LND clients. + Lnd *lndclient.LndServices + + // GroupFetcher is used to fetch asset groups. + GroupFetcher tapgarden.GroupFetcher + + // SupplyCommitView allows us to look up supply commitments and + // pre-commitments. + SupplyCommitView SupplyCommitView + + // SupplyTreeView is used to fetch supply leaves by height. + SupplyTreeView SupplyTreeView +} + +// Validate performs basic validation on the verifier configuration. +func (v *VerifierCfg) Validate() error { + if v.ChainBridge == nil { + return fmt.Errorf("chain bridge is required") + } + + if v.AssetLookup == nil { + return fmt.Errorf("asset lookup is required") + } + + if v.Lnd == nil { + return fmt.Errorf("lnd services is required") + } + + if v.GroupFetcher == nil { + return fmt.Errorf("group fetcher is required") + } + + if v.SupplyCommitView == nil { + return fmt.Errorf("supply commit view is required") + } + + if v.SupplyTreeView == nil { + return fmt.Errorf("supply tree view is required") + } + + return nil +} + +// Verifier is responsible for verifying supply commitments. +type Verifier struct { + // cfg is the configuration for the verifier. + cfg VerifierCfg +} + +// NewVerifier creates a new Verifier with the given configuration. +func NewVerifier(cfg VerifierCfg) (Verifier, error) { + var zero Verifier + + if err := cfg.Validate(); err != nil { + return zero, fmt.Errorf("invalid verifier config: %w", err) + } + + return Verifier{ + cfg: cfg, + }, nil +} + +// ensurePrecommitsSpent verifies that all unspent pre-commitment outputs for +// the specified asset group, which could have been spent by the supply +// commitment transaction, were actually spent. +func (v *Verifier) ensurePrecommitsSpent(ctx context.Context, + assetSpec asset.Specifier, + commitment supplycommit.RootCommitment) error { + + // Fetch all unspent pre-commitment outputs for the asset group. + allPreCommits, err := v.cfg.SupplyCommitView.UnspentPrecommits( + ctx, assetSpec, + ).Unpack() + if err != nil { + return fmt.Errorf("unable to fetch unspent pre-commitments: %w", + err) + } + + // TODO(ffranr): If commitment.SpentCommitment is none, then we + // should ensure that at least one pre-commitment is spent. + // Before implementing this check, we need to ensure that + // remote issued supply pre-commitments are correctly populated and + // retrieved from the db. + + // Filter pre-commits to only include those that are at block heights + // less than or equal to the commitment's anchor block height. All + // unspent pre-commitments at or before the commitment's anchor block + // height must be spent by the commitment transaction. + commitmentBlock, err := commitment.CommitmentBlock.UnwrapOrErr( + fmt.Errorf("missing commitment block"), + ) + if err != nil { + return err + } + + var preCommits []supplycommit.PreCommitment + for idx := range allPreCommits { + preCommit := allPreCommits[idx] + if preCommit.BlockHeight <= commitmentBlock.Height { + preCommits = append(preCommits, preCommit) + } + } + + // Keep track of all matched pre-commitment outpoints to ensure that + // we spend each one exactly once. + matchedOutPoints := make(map[string]struct{}) + for idxCommitTxIn := range commitment.Txn.TxIn { + commitTxIn := commitment.Txn.TxIn[idxCommitTxIn] + + for idxPreCommit := range preCommits { + preCommit := preCommits[idxPreCommit] + preCommitOutPoint := preCommit.OutPoint() + + if commitTxIn.PreviousOutPoint == preCommitOutPoint { + opStr := preCommitOutPoint.String() + matchedOutPoints[opStr] = struct{}{} + break + } + } + } + + if len(matchedOutPoints) != len(preCommits) { + // Log which pre-commitment outpoints were not matched. + var unmatched []string + for idx := range preCommits { + preCommit := preCommits[idx] + preCommitOutPoint := preCommit.OutPoint() + opStr := preCommitOutPoint.String() + if _, ok := matchedOutPoints[opStr]; !ok { + unmatched = append(unmatched, opStr) + } + } + + log.Errorf("Unmatched pre-commitment outpoints in supply "+ + "commit anchor tx inputs set:\n%s", + strings.Join(unmatched, "\n")) + + return fmt.Errorf("supply commitment does not spend all "+ + "known pre-commitments: expected %d, found %d", + len(preCommits), len(matchedOutPoints)) + } + + return nil +} + +// verifyInitialCommit verifies the first (starting) supply commitment for a +// given asset group. +func (v *Verifier) verifyInitialCommit(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + // Assert that the given commitment does not specify a spent outpoint. + // This must be the case for an initial commitment (which is what this + // function verifies). + if commitment.SpentCommitment.IsSome() { + return fmt.Errorf("initial supply commitment must not " + + "specify a spent commitment outpoint") + } + + // Check to ensure that we don't already have a starting + // commitment for the asset group. If we do, then we require a spent + // outpoint to be set on the commitment or that the outpoint is + // the same as the given commitment outpoint. + initCommit, err := v.cfg.SupplyCommitView.FetchStartingCommitment( + ctx, assetSpec, + ) + switch { + case err == nil: + // An initial commitment was found for the asset group. This + // means the given supply commitment is either the initial + // commitment itself, or it is missing a spent outpoint. + if initCommit.CommitPoint() == commitment.CommitPoint() { + // The spent outpoint matches the current commitment + // outpoint. This indicates the commitment has already + // been verified and stored, so we return nil to + // signal verification is complete. + return nil + } + + return fmt.Errorf("found alternative initial commitment for "+ + "asset group (asset=%s)", assetSpec.String()) + + case errors.Is(err, ErrCommitmentNotFound): + // This is the first commitment for the asset group, so we can + // proceed without a spent outpoint. + + default: + return fmt.Errorf("failed to check for starting commitment: "+ + "%w", err) + } + + // Confirm that the given supply commitment transaction spends all known + // unspent pre-commitment outputs. Pre-commitment outputs are outputs + // that were created at the time of asset issuance, and are the + // starting point for the supply commitment chain. Each asset issuance + // anchor transaction can have at most one pre-commitment output. + err = v.ensurePrecommitsSpent(ctx, assetSpec, commitment) + if err != nil { + return fmt.Errorf("unable to verify pre-commitment spends: %w", + err) + } + + // Confirm that the given supply leaves are consistent with the + // given commitment root. + // + // Apply leaves to empty supply trees to generate the initial set of + // supply subtrees. + supplyTrees, err := supplycommit.ApplyTreeUpdates( + supplycommit.SupplyTrees{}, leaves.AllUpdates(), + ) + if err != nil { + return fmt.Errorf("unable to generate supply subtrees from "+ + "supply leaves: %w", err) + } + + // Create a new empty root supply tree and apply the supply subtrees + // generated above. + emptyRootSupplyTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + + rootSupplyTree, err := supplycommit.UpdateRootSupplyTree( + ctx, emptyRootSupplyTree, supplyTrees, + ) + if err != nil { + return fmt.Errorf("unable to formulate root supply tree: %w", + err) + } + + // Ensure that the root of the formulated supply tree matches the + // commitment root. + genRoot, err := rootSupplyTree.Root(ctx) + if err != nil { + return fmt.Errorf("unable to compute root of generated "+ + "supply tree: %w", err) + } + + if genRoot.NodeHash() != commitment.SupplyRoot.NodeHash() { + return fmt.Errorf("generated supply tree root does not match " + + "commitment supply root") + } + + return nil +} + +// verifyIncrementalCommit verifies an incremental supply commitment for a +// given asset group. Verification succeeds only if the previous supply +// commitment is known and verified, and the given supply leaves are +// consistent with the commitment root. +func (v *Verifier) verifyIncrementalCommit(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + // Fetch previous supply commitment based on the spent outpoint. This + // step ensures that we have already verified the previous + // commitment, and that it is present in the database. + spentOutPoint, err := commitment.SpentCommitment.UnwrapOrErr( + fmt.Errorf("missing spent supply commitment outpoint"), + ) + if err != nil { + return err + } + + spentCommit, err := + v.cfg.SupplyCommitView.FetchCommitmentByOutpoint( + ctx, assetSpec, spentOutPoint, + ) + if err != nil { + return ErrPrevCommitmentNotFound + } + + // Check that the given commitment spends the previous commitment's + // outpoint that is referenced by the given spent outpoint field. + checkSpendPrevOutPoint := false + for idx := range commitment.Txn.TxIn { + txIn := commitment.Txn.TxIn[idx] + if txIn.PreviousOutPoint == spentOutPoint { + checkSpendPrevOutPoint = true + break + } + } + + if !checkSpendPrevOutPoint { + return fmt.Errorf("supply commitment does not spend " + + "provided previous commitment outpoint") + } + + // Verify that every unspent pre-commitment output eligible by block + // height is actually spent by the supply commitment transaction. + err = v.ensurePrecommitsSpent(ctx, assetSpec, commitment) + if err != nil { + return fmt.Errorf("unable to verify pre-commitment spends: %w", + err) + } + + // Get latest supply root tree and subtrees from the local db. Ensure + // that they correspond to the spent supply commitment outpoint. + spentRootTree, spentSubtrees, err := + v.cfg.SupplyTreeView.FetchSupplyTrees( + ctx, assetSpec, + ) + if err != nil { + return fmt.Errorf("unable to fetch spent root supply tree: %w", + err) + } + + storedSpentRoot, err := spentRootTree.Root(ctx) + if err != nil { + return fmt.Errorf("unable to compute root of local spent "+ + "supply tree: %w", err) + } + + if storedSpentRoot.NodeHash() != spentCommit.SupplyRoot.NodeHash() { + return fmt.Errorf("local spent supply tree root does not " + + "match spent commitment supply root") + } + + // Apply new leaves to the spent subtrees to generate the new set of + // supply subtrees. + newSupplyTrees, err := supplycommit.ApplyTreeUpdates( + *spentSubtrees, leaves.AllUpdates(), + ) + if err != nil { + return fmt.Errorf("unable to apply tree updates to spent "+ + "commitment: %w", err) + } + + // Reconstruct the root supply tree by applying the new leaves to + // the previous root supply tree. + expectedSupplyTree, err := supplycommit.UpdateRootSupplyTree( + ctx, spentRootTree, newSupplyTrees, + ) + if err != nil { + return fmt.Errorf("unable to generate expected root supply "+ + "tree: %w", err) + } + + expectedRoot, err := expectedSupplyTree.Root(ctx) + if err != nil { + return fmt.Errorf("unable to compute root of expected supply "+ + "tree: %w", err) + } + + // Ensure that the root of the reconstructed supply tree matches + // the commitment root. + if expectedRoot.NodeHash() != commitment.SupplyRoot.NodeHash() { + return fmt.Errorf("expected supply tree root does not match " + + "commitment supply root") + } + + return nil +} + +// proofVerifierCtx returns a verifier context that can be used to verify +// proofs. +func (v *Verifier) proofVerifierCtx(ctx context.Context) proof.VerifierCtx { + headerVerifier := tapgarden.GenHeaderVerifier(ctx, v.cfg.ChainBridge) + merkleVerifier := proof.DefaultMerkleVerifier + groupVerifier := tapgarden.GenGroupVerifier(ctx, v.cfg.GroupFetcher) + groupAnchorVerifier := tapgarden.GenGroupAnchorVerifier( + ctx, v.cfg.GroupFetcher, + ) + + return proof.VerifierCtx{ + HeaderVerifier: headerVerifier, + MerkleVerifier: merkleVerifier, + GroupVerifier: groupVerifier, + GroupAnchorVerifier: groupAnchorVerifier, + ChainLookupGen: v.cfg.ChainBridge, + } +} + +// verifyIssuanceLeaf verifies a single issuance leaf entry. +func (v *Verifier) verifyIssuanceLeaf(ctx context.Context, + issuanceEntry supplycommit.NewMintEvent) error { + + var issuanceProof proof.Proof + err := issuanceProof.Decode( + bytes.NewReader(issuanceEntry.IssuanceProof.RawProof), + ) + if err != nil { + return fmt.Errorf("unable to decode issuance proof: %w", err) + } + + vCtx := v.proofVerifierCtx(ctx) + lookup, err := vCtx.ChainLookupGen.GenProofChainLookup( + &issuanceProof, + ) + if err != nil { + return fmt.Errorf("unable to generate proof chain lookup: %w", + err) + } + + _, err = issuanceProof.Verify(ctx, nil, lookup, vCtx) + if err != nil { + return fmt.Errorf("burn leaf proof failed verification: %w", + err) + } + + // Ensure that the leaf key asset ID matches the asset ID in the + // issuance proof. + leafKeyAssetID := issuanceEntry.LeafKey.LeafAssetID() + proofAssetID := issuanceProof.Asset.Genesis.ID() + + if leafKeyAssetID != proofAssetID { + return fmt.Errorf("issance leaf key asset id does not match " + + "issance proof asset id") + } + + return nil +} + +// verifyIgnoreLeaf verifies a single ignore leaf entry. +func (v *Verifier) verifyIgnoreLeaf(ctx context.Context, + delegationPubKey btcec.PublicKey, + ignoreEntry supplycommit.NewIgnoreEvent) error { + + signedIgnore := ignoreEntry.SignedIgnoreTuple + sigBytes := signedIgnore.Sig.Val.Signature.Serialize() + + digest, err := signedIgnore.IgnoreTuple.Val.Digest() + if err != nil { + return fmt.Errorf("failed to compute ignore tuple digest: %w", + err) + } + + pubKeyByteSlice := delegationPubKey.SerializeCompressed() + var pubKeyBytes [33]byte + copy(pubKeyBytes[:], pubKeyByteSlice) + + sigVerifyResult, err := v.cfg.Lnd.Signer.VerifyMessage( + ctx, digest[:], sigBytes, pubKeyBytes, + lndclient.VerifySchnorr(), + ) + if err != nil { + return fmt.Errorf("error when verifying signed message: %w", + err) + } + + if !sigVerifyResult { + return fmt.Errorf("failed to verify signed ignore tuple " + + "signature") + } + + return nil +} + +// verifyBurnLeaf verifies a single burn leaf entry. +func (v *Verifier) verifyBurnLeaf(ctx context.Context, + burnEntry supplycommit.NewBurnEvent) error { + + burnProof := burnEntry.BurnProof + if burnProof == nil { + return fmt.Errorf("missing burn proof for burn leaf") + } + + vCtx := v.proofVerifierCtx(ctx) + lookup, err := vCtx.ChainLookupGen.GenProofChainLookup( + burnProof, + ) + if err != nil { + return fmt.Errorf("unable to generate proof chain lookup: %w", + err) + } + + _, err = burnProof.Verify(ctx, nil, lookup, vCtx) + if err != nil { + return fmt.Errorf("burn leaf proof failed verification: %w", + err) + } + + // Ensure that the leaf key asset ID matches the asset ID in the burn + // proof. + leafKeyAssetID := burnEntry.BurnLeaf.UniverseKey.LeafAssetID() + proofAssetID := burnProof.Asset.Genesis.ID() + + if leafKeyAssetID != proofAssetID { + return fmt.Errorf("burn leaf key asset id does not match " + + "burn proof asset id") + } + + return nil +} + +// verifySupplyLeaves performs validation of the provided supply leaves. +func (v *Verifier) verifySupplyLeaves(ctx context.Context, + delegationPubKey btcec.PublicKey, + leaves supplycommit.SupplyLeaves) error { + + // Ensure that all supply leaf block heights are set. + err := leaves.ValidateBlockHeights() + if err != nil { + return fmt.Errorf("supply leaves validation failed: %w", err) + } + + // Verify issuance leaves, if any are present. + for idx := range leaves.IssuanceLeafEntries { + issuanceEntry := leaves.IssuanceLeafEntries[idx] + + err = v.verifyIssuanceLeaf(ctx, issuanceEntry) + if err != nil { + return fmt.Errorf("issuance leaf failed "+ + "verification: %w", err) + } + } + + // Verify ignore leaves, if any are present. + for idx := range leaves.IgnoreLeafEntries { + ignoreEntry := leaves.IgnoreLeafEntries[idx] + + err = v.verifyIgnoreLeaf(ctx, delegationPubKey, ignoreEntry) + if err != nil { + return fmt.Errorf("ignore leaf failed verification: %w", + err) + } + } + + // Verify burn leaves, if any are present. + for idx := range leaves.BurnLeafEntries { + burnEntry := leaves.BurnLeafEntries[idx] + + err = v.verifyBurnLeaf(ctx, burnEntry) + if err != nil { + return fmt.Errorf("burn leaf failed verification: %w", + err) + } + } + + return nil +} + +// fetchDelegationKey fetches the delegation key for the given asset specifier. +func (v *Verifier) fetchDelegationKey(ctx context.Context, + assetSpec asset.Specifier) (btcec.PublicKey, error) { + + var zero btcec.PublicKey + + metaReveal, err := supplycommit.FetchLatestAssetMetadata( + ctx, v.cfg.AssetLookup, assetSpec, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch asset "+ + "metadata: %w", err) + } + + delegationKey, err := metaReveal.DelegationKey.UnwrapOrErr( + fmt.Errorf("missing delegation key in asset metadata"), + ) + if err != nil { + return zero, err + } + + return delegationKey, nil +} + +// VerifyCommit verifies a supply commitment for a given asset group. +// Verification succeeds only if all previous supply commitment dependencies +// are known and verified. The dependency chain must be traceable back to the +// asset issuance anchoring transaction and its pre-commitment output(s). +func (v *Verifier) VerifyCommit(ctx context.Context, + assetSpec asset.Specifier, commitment supplycommit.RootCommitment, + leaves supplycommit.SupplyLeaves) error { + + // TODO(ffranr): Consider: should we require some leaves to be present? + // Or for forward compatibility, allow no leaves? + + // Perform static on-chain verification of the supply commitment's + // anchoring block header. This provides a basic proof-of-work guarantee + // that gates further verification steps. + headerVerifier := tapgarden.GenHeaderVerifier(ctx, v.cfg.ChainBridge) + err := commitment.VerifyChainAnchor( + proof.DefaultMerkleVerifier, headerVerifier, + ) + if err != nil { + return fmt.Errorf("unable to verify supply commitment: %w", err) + } + + // Attempt to fetch the supply commitment by its outpoint, to + // ensure that it is not already present in the database. + _, err = v.cfg.SupplyCommitView.FetchCommitmentByOutpoint( + ctx, assetSpec, commitment.CommitPoint(), + ) + switch { + case err == nil: + // Found commitment, assume already verified and stored. + return nil + + case errors.Is(err, ErrCommitmentNotFound): + // Do nothing, continue to verification of given commitment. + + default: + return fmt.Errorf("failed to check for existing supply "+ + "commitment with given outpoint: %w", err) + } + + delegationKey, err := v.fetchDelegationKey(ctx, assetSpec) + if err != nil { + return fmt.Errorf("unable to fetch delegation key: %w", err) + } + + // Perform validation of the provided supply leaves. + err = v.verifySupplyLeaves(ctx, delegationKey, leaves) + if err != nil { + return fmt.Errorf("unable to verify supply leaves: %w", err) + } + + // If the commitment does not specify a spent outpoint, then we dispatch + // to the initial commitment verification routine. + if commitment.SpentCommitment.IsNone() { + return v.verifyInitialCommit(ctx, assetSpec, commitment, leaves) + } + + // Otherwise, we dispatch to the incremental commitment verification + // routine. + return v.verifyIncrementalCommit(ctx, assetSpec, commitment, leaves) +}