Skip to content

Commit ee8fc89

Browse files
committed
loadtest: add mintTestV2
We add a new mintV2 test which mints normal assets of a configured supply into a fixed number of groups. This is an enhanced and more lightweight version of the previous mint test, as it uses less assertions and rpc calls.
1 parent 5ca94a6 commit ee8fc89

File tree

2 files changed

+204
-0
lines changed

2 files changed

+204
-0
lines changed

itest/loadtest/load_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,10 @@ var loadTestCases = []testCase{
3939
name: "mint",
4040
fn: mintTest,
4141
},
42+
{
43+
name: "mintV2",
44+
fn: mintTestV2,
45+
},
4246
{
4347
name: "send",
4448
fn: sendTest,

itest/loadtest/mint_batch_test.go

Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,15 @@ import (
99
"math/rand"
1010
"strings"
1111
"testing"
12+
"time"
1213

14+
"github.com/btcsuite/btcd/rpcclient"
1315
"github.com/lightninglabs/taproot-assets/fn"
1416
"github.com/lightninglabs/taproot-assets/itest"
1517
"github.com/lightninglabs/taproot-assets/taprpc"
1618
"github.com/lightninglabs/taproot-assets/taprpc/mintrpc"
1719
unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc"
20+
"github.com/lightningnetwork/lnd/lntest/wait"
1821
"github.com/stretchr/testify/require"
1922
)
2023

@@ -166,3 +169,200 @@ func mintTest(t *testing.T, ctx context.Context, cfg *Config) {
166169

167170
itest.SyncUniverses(ctx, t, bob, alice, aliceHost, cfg.TestTimeout)
168171
}
172+
173+
// mintTestV2 checks that we can mint a batch of assets. It is a more
174+
// performant version of the existing mintTest, as it uses less assertions and
175+
// RPC calls.
176+
func mintTestV2(t *testing.T, ctx context.Context, cfg *Config) {
177+
// Start by initializing all our client connections.
178+
alice, bob, bitcoinClient := initClients(t, ctx, cfg)
179+
180+
// We query the assets of each node once on this step. Every function
181+
// that needs to take a node's assets into account will be passed these
182+
// values instead of calling the RPC again. This is done to minimize
183+
// collateral RPC impact of the loadtest.
184+
resAlice, err := alice.ListAssets(ctx, &taprpc.ListAssetRequest{})
185+
require.NoError(t, err)
186+
187+
resBob, err := bob.ListAssets(ctx, &taprpc.ListAssetRequest{})
188+
require.NoError(t, err)
189+
190+
assetsAlice := resAlice.Assets
191+
assetsBob := resBob.Assets
192+
193+
totalAssets := make([]*taprpc.Asset, len(assetsAlice)+len(assetsBob))
194+
copy(totalAssets, assetsAlice)
195+
copy(totalAssets[len(assetsAlice):], assetsBob)
196+
197+
// Alice serves as the minter.
198+
//
199+
// TODO(george): Currently we use only 1 fixed minter, but this could
200+
// change in the future to emulate a more realistic environment where
201+
// multiple nodes continuously mint assets into their own groups.
202+
minter := alice
203+
204+
// First we make sure group initialization is completed. We check if
205+
// there's any more groups left
206+
existingGroups := getTotalAssetGroups(totalAssets)
207+
groupKeys := make(map[string][]byte, 0)
208+
209+
for _, v := range existingGroups {
210+
tweakedKey, err := hex.DecodeString(v)
211+
require.NoError(t, err)
212+
213+
groupKeys[v] = tweakedKey
214+
}
215+
216+
var remainingGroups int
217+
if cfg.TotalNumGroups > len(existingGroups) {
218+
remainingGroups = cfg.TotalNumGroups - len(existingGroups)
219+
}
220+
221+
t.Logf("Existing groups=%v, minting %v new groups",
222+
len(existingGroups), remainingGroups)
223+
for range remainingGroups {
224+
mintNewGroup(t, ctx, bitcoinClient, minter, cfg)
225+
}
226+
227+
// If there aren't any existing groups we skip the rest of the steps, we
228+
// will mint into those groups in another run.
229+
if len(existingGroups) == 0 {
230+
return
231+
}
232+
233+
groupIndex := rand.Intn(len(existingGroups))
234+
groupKey := groupKeys[existingGroups[groupIndex]]
235+
236+
mintIntoGroup(t, ctx, bitcoinClient, minter, groupKey, cfg)
237+
}
238+
239+
// mintNewGroup mints an asset that creates a new group.
240+
func mintNewGroup(t *testing.T, ctx context.Context, miner *rpcclient.Client,
241+
minter *rpcClient, cfg *Config) []*taprpc.Asset {
242+
243+
mintAmt := rand.Uint64() % uint64(cfg.MintSupplyMax)
244+
if mintAmt < uint64(cfg.MintSupplyMin) {
245+
mintAmt = uint64(cfg.MintSupplyMin)
246+
}
247+
248+
assetRequests := []*mintrpc.MintAssetRequest{{
249+
Asset: &mintrpc.MintAsset{
250+
AssetType: taprpc.AssetType_NORMAL,
251+
Name: fmt.Sprintf(
252+
"tapcoin-%d", time.Now().UnixNano(),
253+
),
254+
AssetMeta: &taprpc.AssetMeta{
255+
Data: []byte("{}"),
256+
Type: taprpc.AssetMetaType_META_TYPE_JSON,
257+
},
258+
Amount: mintAmt,
259+
NewGroupedAsset: true,
260+
DecimalDisplay: 4,
261+
},
262+
}}
263+
264+
return finishMint(t, ctx, miner, minter, assetRequests)
265+
}
266+
267+
// mintIntoGroup mints as many assets as the batch size and puts them in the
268+
// existing group that is provided by the corresponding argument.
269+
func mintIntoGroup(t *testing.T, ctx context.Context, miner *rpcclient.Client,
270+
minter *rpcClient, tweakedKey []byte, cfg *Config) []*taprpc.Asset {
271+
272+
mintAmt := rand.Uint64() % uint64(cfg.MintSupplyMax)
273+
if mintAmt < uint64(cfg.MintSupplyMin) {
274+
mintAmt = uint64(cfg.MintSupplyMin)
275+
}
276+
277+
var assetRequests []*mintrpc.MintAssetRequest
278+
279+
t.Logf("Minting %v assets into group %x", cfg.BatchSize, tweakedKey)
280+
281+
for range cfg.BatchSize {
282+
ts := time.Now().UnixNano()
283+
284+
// nolint:lll
285+
req := &mintrpc.MintAssetRequest{
286+
Asset: &mintrpc.MintAsset{
287+
AssetType: taprpc.AssetType_NORMAL,
288+
Name: fmt.Sprintf("tapcoin-%d", ts),
289+
AssetMeta: &taprpc.AssetMeta{
290+
Data: []byte("{}"),
291+
Type: taprpc.AssetMetaType_META_TYPE_JSON,
292+
},
293+
Amount: mintAmt,
294+
GroupedAsset: true,
295+
GroupKey: tweakedKey,
296+
DecimalDisplay: 4,
297+
},
298+
}
299+
300+
assetRequests = append(assetRequests, req)
301+
}
302+
303+
return finishMint(t, ctx, miner, minter, assetRequests)
304+
}
305+
306+
// finishMint accepts a list of asset requests and performs the necessary RPC
307+
// calls to create and finalize a minting batch.
308+
func finishMint(t *testing.T, ctx context.Context, miner *rpcclient.Client,
309+
minter *rpcClient,
310+
assetRequests []*mintrpc.MintAssetRequest) []*taprpc.Asset {
311+
312+
ctxc, streamCancel := context.WithCancel(ctx)
313+
stream, err := minter.SubscribeMintEvents(
314+
ctxc, &mintrpc.SubscribeMintEventsRequest{},
315+
)
316+
require.NoError(t, err)
317+
sub := &itest.EventSubscription[*mintrpc.MintEvent]{
318+
ClientEventStream: stream,
319+
Cancel: streamCancel,
320+
}
321+
322+
itest.BuildMintingBatch(t, minter, assetRequests)
323+
324+
ctxb := context.Background()
325+
ctxt, cancel := context.WithTimeout(ctxb, wait.DefaultTimeout)
326+
defer cancel()
327+
328+
finalizeReq := &mintrpc.FinalizeBatchRequest{}
329+
330+
// Instruct the daemon to finalize the batch.
331+
batchResp, err := minter.FinalizeBatch(ctxt, finalizeReq)
332+
require.NoError(t, err)
333+
require.NotEmpty(t, batchResp.Batch)
334+
require.Len(t, batchResp.Batch.Assets, len(assetRequests))
335+
require.Equal(
336+
t, mintrpc.BatchState_BATCH_STATE_BROADCAST,
337+
batchResp.Batch.State,
338+
)
339+
340+
itest.WaitForBatchState(
341+
t, ctxt, minter, wait.DefaultTimeout,
342+
batchResp.Batch.BatchKey,
343+
mintrpc.BatchState_BATCH_STATE_BROADCAST,
344+
)
345+
hashes, err := itest.WaitForNTxsInMempool(
346+
miner, 1, wait.DefaultTimeout,
347+
)
348+
require.NoError(t, err)
349+
require.GreaterOrEqual(t, len(hashes), 1)
350+
351+
return itest.ConfirmBatch(
352+
t, miner, minter, assetRequests, sub, *hashes[0],
353+
batchResp.Batch.BatchKey,
354+
)
355+
}
356+
357+
// getTotalAssetGroups returns the total number of asset groups found in the
358+
// passed array of assets.
359+
func getTotalAssetGroups(assets []*taprpc.Asset) []string {
360+
groups := fn.NewSet[string]()
361+
362+
for _, v := range assets {
363+
groupKeyStr := fmt.Sprintf("%x", v.AssetGroup.TweakedGroupKey)
364+
groups.Add(groupKeyStr)
365+
}
366+
367+
return groups.ToSlice()
368+
}

0 commit comments

Comments
 (0)