Skip to content

Commit e1da4bb

Browse files
ZZiigguurraattguggero
authored andcommitted
itest: add itest with multiple V2 address round trips
This is the integration test that initially discovered the issue with split outputs that become passive assets in a subsequent transfer. We add it here to make sure the bug is properly fixed.
1 parent 4336b79 commit e1da4bb

File tree

2 files changed

+262
-0
lines changed

2 files changed

+262
-0
lines changed

itest/addrs_v2_test.go

Lines changed: 258 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,264 @@ func testAddressV2WithGroupKey(t *harnessTest) {
396396
)
397397
}
398398

399+
// testAddressV2WithGroupKeyMultipleRoundTrips tests what we can send assets
400+
// back and forth multiple times using V2 addresses.
401+
func testAddressV2WithGroupKeyMultipleRoundTrips(t *harnessTest) {
402+
// We begin by minting a new asset group with a group key.
403+
firstTrancheReq := CopyRequest(issuableAssets[0])
404+
405+
firstTrancheReq.Asset.Amount = 212e5
406+
407+
firstTranche := MintAssetsConfirmBatch(
408+
t.t, t.lndHarness.Miner().Client, t.tapd,
409+
[]*mintrpc.MintAssetRequest{firstTrancheReq},
410+
)
411+
firstAsset := firstTranche[0]
412+
firstAssetID := firstAsset.AssetGenesis.AssetId
413+
groupKey := firstAsset.AssetGroup.TweakedGroupKey
414+
415+
// And then we mint a second tranche of the same asset group.
416+
secondTrancheReq := CopyRequest(firstTrancheReq)
417+
secondTrancheReq.Asset.Name = "itestbuxx-money-printer-brrr-tranche-2"
418+
secondTrancheReq.Asset.GroupedAsset = true
419+
secondTrancheReq.Asset.NewGroupedAsset = false
420+
secondTrancheReq.Asset.GroupKey = groupKey
421+
422+
secondTrancheReq.Asset.Amount = 202e5
423+
424+
secondTranche := MintAssetsConfirmBatch(
425+
t.t, t.lndHarness.Miner().Client, t.tapd,
426+
[]*mintrpc.MintAssetRequest{secondTrancheReq},
427+
)
428+
secondAsset := secondTranche[0]
429+
secondAssetID := secondAsset.AssetGenesis.AssetId
430+
431+
// And then we mint a third tranche of the same asset group.
432+
thirdTrancheReq := CopyRequest(firstTrancheReq)
433+
thirdTrancheReq.Asset.Name = "itestbuxx-money-printer-brrr-tranche-3"
434+
thirdTrancheReq.Asset.GroupedAsset = true
435+
thirdTrancheReq.Asset.NewGroupedAsset = false
436+
thirdTrancheReq.Asset.GroupKey = groupKey
437+
438+
thirdTrancheReq.Asset.Amount = 182e5
439+
440+
thirdTranche := MintAssetsConfirmBatch(
441+
t.t, t.lndHarness.Miner().Client, t.tapd,
442+
[]*mintrpc.MintAssetRequest{thirdTrancheReq},
443+
)
444+
thirdAsset := thirdTranche[0]
445+
thirdAssetID := thirdAsset.AssetGenesis.AssetId
446+
447+
ctxb := context.Background()
448+
ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout)
449+
defer cancel()
450+
451+
totalAmount := firstAsset.Amount + secondAsset.Amount +
452+
thirdAsset.Amount
453+
t.Logf("Minted %d units for group %x", totalAmount, groupKey)
454+
455+
// Now we can create an address with the group key.
456+
// We'll make a second node now that'll be the receiver of all the
457+
// assets made above.
458+
bobLnd := t.lndHarness.NewNodeWithCoins("Bob", nil)
459+
bobTapd := setupTapdHarness(t.t, t, bobLnd, t.universeServer)
460+
defer func() {
461+
require.NoError(t.t, bobTapd.stop(!*noDelete))
462+
}()
463+
464+
groupAddrBob, _ := NewAddrWithEventStream(
465+
t.t, bobTapd, &taprpc.NewAddrRequest{
466+
AddressVersion: taprpc.AddrVersion_ADDR_VERSION_V2,
467+
GroupKey: groupKey,
468+
},
469+
)
470+
471+
t.Logf("Got group addr: %v", toJSON(t.t, groupAddrBob))
472+
473+
currentTransferIdx := -1
474+
numTransfers := 0
475+
476+
bobCurrentTransferIdx := -1
477+
bobNumTransfers := 0
478+
479+
// We send the first tranche from alice to bob.
480+
currentTransferIdx += 1
481+
numTransfers += 1
482+
483+
sendResp, err := t.tapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
484+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
485+
{
486+
TapAddr: groupAddrBob.Encoded,
487+
Amount: firstAsset.Amount,
488+
},
489+
},
490+
})
491+
require.NoError(t.t, err)
492+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
493+
AssertAssetOutboundTransferWithOutputs(
494+
t.t, t.lndHarness.Miner().Client, t.tapd,
495+
sendResp.Transfer, [][]byte{firstAssetID},
496+
[]uint64{0, firstAsset.Amount}, currentTransferIdx,
497+
numTransfers, 2, true,
498+
)
499+
500+
AssertAddrEventByStatus(t.t, bobTapd, statusCompleted, numTransfers)
501+
502+
// Then we send the second tranche from alice to bob.
503+
currentTransferIdx += 1
504+
numTransfers += 1
505+
506+
sendResp, err = t.tapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
507+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
508+
{
509+
TapAddr: groupAddrBob.Encoded,
510+
Amount: secondAsset.Amount,
511+
},
512+
},
513+
})
514+
require.NoError(t.t, err)
515+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
516+
AssertAssetOutboundTransferWithOutputs(
517+
t.t, t.lndHarness.Miner().Client, t.tapd,
518+
sendResp.Transfer, [][]byte{secondAssetID},
519+
[]uint64{0, secondAsset.Amount}, currentTransferIdx,
520+
numTransfers, 2, true,
521+
)
522+
523+
AssertAddrEventByStatus(t.t, bobTapd, statusCompleted, numTransfers)
524+
525+
// And now the third tranche from alice to bob.
526+
currentTransferIdx += 1
527+
numTransfers += 1
528+
529+
sendResp, err = t.tapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
530+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
531+
{
532+
TapAddr: groupAddrBob.Encoded,
533+
Amount: thirdAsset.Amount,
534+
},
535+
},
536+
})
537+
require.NoError(t.t, err)
538+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
539+
AssertAssetOutboundTransferWithOutputs(
540+
t.t, t.lndHarness.Miner().Client, t.tapd,
541+
sendResp.Transfer, [][]byte{thirdAssetID},
542+
[]uint64{0, thirdAsset.Amount}, currentTransferIdx,
543+
numTransfers, 2, true,
544+
)
545+
546+
AssertAddrEventByStatus(t.t, bobTapd, statusCompleted, numTransfers)
547+
548+
// We now make sure we can spend those assets again by sending
549+
// them from bob back to Alice, using an address with an amount.
550+
// this time, we'll send back all 3 tranches at once.
551+
groupAddrAlice, _ := NewAddrWithEventStream(
552+
t.t, t.tapd, &taprpc.NewAddrRequest{
553+
Amt: totalAmount,
554+
AddressVersion: addrV2,
555+
GroupKey: groupKey,
556+
},
557+
)
558+
559+
t.Logf("Got group addr: %v", toJSON(t.t, groupAddrAlice))
560+
561+
bobCurrentTransferIdx += 1
562+
bobNumTransfers += 1
563+
564+
sendResp, err = bobTapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
565+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
566+
{
567+
TapAddr: groupAddrAlice.Encoded,
568+
},
569+
},
570+
})
571+
require.NoError(t.t, err)
572+
573+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
574+
575+
MineBlocks(t.t, t.lndHarness.Miner().Client, 1, 1)
576+
577+
AssertAddrEventByStatus(t.t, t.tapd, statusCompleted, bobNumTransfers)
578+
579+
// Now we send back from alice to bob again, but this time all at once
580+
// instead of individually.
581+
for i := 0; i < 4; i++ {
582+
currentTransferIdx += 1
583+
numTransfers += 1
584+
585+
sendResp, err = t.tapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
586+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
587+
{
588+
TapAddr: groupAddrBob.Encoded,
589+
Amount: totalAmount,
590+
},
591+
},
592+
})
593+
require.NoError(t.t, err)
594+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
595+
MineBlocks(t.t, t.lndHarness.Miner().Client, 1, 1)
596+
AssertAddrEventByStatus(
597+
t.t, bobTapd, statusCompleted, numTransfers,
598+
)
599+
600+
// We again return all three tranches back to Alice.
601+
bobCurrentTransferIdx += 1
602+
bobNumTransfers += 1
603+
604+
addrReq := []*taprpc.AddressWithAmount{
605+
{
606+
TapAddr: groupAddrAlice.Encoded,
607+
},
608+
}
609+
sendResp, err = bobTapd.SendAsset(
610+
ctxt, &taprpc.SendAssetRequest{
611+
AddressesWithAmounts: addrReq,
612+
},
613+
)
614+
require.NoError(t.t, err)
615+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
616+
MineBlocks(t.t, t.lndHarness.Miner().Client, 1, 1)
617+
AssertAddrEventByStatus(
618+
t.t, t.tapd, statusCompleted, bobNumTransfers,
619+
)
620+
}
621+
622+
// We now send each tranche back to bob individually, to make sure they
623+
// are spendable individually.
624+
amounts := []uint64{
625+
firstAsset.Amount, secondAsset.Amount, thirdAsset.Amount,
626+
}
627+
assetIDs := [][]byte{
628+
firstAssetID, secondAssetID, thirdAssetID,
629+
}
630+
for idx, amount := range amounts {
631+
currentTransferIdx += 1
632+
numTransfers += 1
633+
634+
sendResp, err = t.tapd.SendAsset(ctxt, &taprpc.SendAssetRequest{
635+
AddressesWithAmounts: []*taprpc.AddressWithAmount{
636+
{
637+
TapAddr: groupAddrBob.Encoded,
638+
Amount: amount,
639+
},
640+
},
641+
})
642+
require.NoError(t.t, err)
643+
t.Logf("Sent asset to group addr: %v", toJSON(t.t, sendResp))
644+
AssertAssetOutboundTransferWithOutputs(
645+
t.t, t.lndHarness.Miner().Client, t.tapd,
646+
sendResp.Transfer, [][]byte{assetIDs[idx]},
647+
[]uint64{0, amount}, currentTransferIdx, numTransfers,
648+
2, true,
649+
)
650+
651+
AssertAddrEventByStatus(
652+
t.t, bobTapd, statusCompleted, numTransfers,
653+
)
654+
}
655+
}
656+
399657
// testAddressV2WithGroupKeyRestart tests that we can re-try and properly
400658
// continue the address v2 send process in various scenarios.
401659
func testAddressV2WithGroupKeyRestart(t *harnessTest) {

itest/test_list_on_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -379,6 +379,10 @@ var allTestCases = []*testCase{
379379
name: "address v2 with group key",
380380
test: testAddressV2WithGroupKey,
381381
},
382+
{
383+
name: "address v2 with group key multiple round trips",
384+
test: testAddressV2WithGroupKeyMultipleRoundTrips,
385+
},
382386
{
383387
name: "address v2 with group key restart",
384388
test: testAddressV2WithGroupKeyRestart,

0 commit comments

Comments
 (0)