diff --git a/autopilot/prefattach_test.go b/autopilot/prefattach_test.go index 70f7e2b68b9..67cfc11f0ab 100644 --- a/autopilot/prefattach_test.go +++ b/autopilot/prefattach_test.go @@ -417,19 +417,22 @@ func (d *testDBGraph) addRandChannel(node1, node2 *btcec.PublicKey, case errors.Is(err, graphdb.ErrGraphNodeNotFound): fallthrough case errors.Is(err, graphdb.ErrGraphNotFound): - graphNode := &models.Node{ - HaveNodeAnnouncement: true, - Addresses: []net.Addr{&net.TCPAddr{ - IP: bytes.Repeat( - []byte("a"), 16, - ), - }}, - Features: lnwire.NewFeatureVector( - nil, lnwire.Features, - ), - AuthSigBytes: testSig.Serialize(), - } - graphNode.AddPubKey(pub) + var pubKey [33]byte + copy(pubKey[:], pub.SerializeCompressed()) + //nolint:ll + graphNode := models.NewV1Node( + pubKey, &models.NodeV1Fields{ + Addresses: []net.Addr{&net.TCPAddr{ + IP: bytes.Repeat( + []byte("a"), 16, + ), + }}, + Features: lnwire.NewFeatureVector( + nil, lnwire.Features, + ).RawFeatureVector, + AuthSigBytes: testSig.Serialize(), + }, + ) err := d.db.AddNode( context.Background(), graphNode, ) @@ -447,8 +450,9 @@ func (d *testDBGraph) addRandChannel(node1, node2 *btcec.PublicKey, if err != nil { return nil, err } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, + var pubKey [33]byte + copy(pubKey[:], nodeKey.SerializeCompressed()) + dbNode := models.NewV1Node(pubKey, &models.NodeV1Fields{ Addresses: []net.Addr{ &net.TCPAddr{ IP: bytes.Repeat([]byte("a"), 16), @@ -456,10 +460,9 @@ func (d *testDBGraph) addRandChannel(node1, node2 *btcec.PublicKey, }, Features: lnwire.NewFeatureVector( nil, lnwire.Features, - ), + ).RawFeatureVector, AuthSigBytes: testSig.Serialize(), - } - dbNode.AddPubKey(nodeKey) + }) if err := d.db.AddNode( context.Background(), dbNode, ); err != nil { @@ -494,7 +497,11 @@ func (d *testDBGraph) addRandChannel(node1, node2 *btcec.PublicKey, Capacity: capacity, Features: lnwire.EmptyFeatureVector(), } - edge.AddNodeKeys(lnNode1, lnNode2, lnNode1, lnNode2) + copy(edge.NodeKey1Bytes[:], lnNode1.SerializeCompressed()) + copy(edge.NodeKey2Bytes[:], lnNode2.SerializeCompressed()) + copy(edge.BitcoinKey1Bytes[:], lnNode1.SerializeCompressed()) + copy(edge.BitcoinKey2Bytes[:], lnNode2.SerializeCompressed()) + if err := d.db.AddChannelEdge(ctx, edge); err != nil { return nil, nil, err } @@ -548,8 +555,9 @@ func (d *testDBGraph) addRandNode() (*btcec.PublicKey, error) { if err != nil { return nil, err } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, + var pubKey [33]byte + copy(pubKey[:], nodeKey.SerializeCompressed()) + dbNode := models.NewV1Node(pubKey, &models.NodeV1Fields{ Addresses: []net.Addr{ &net.TCPAddr{ IP: bytes.Repeat([]byte("a"), 16), @@ -557,10 +565,9 @@ func (d *testDBGraph) addRandNode() (*btcec.PublicKey, error) { }, Features: lnwire.NewFeatureVector( nil, lnwire.Features, - ), + ).RawFeatureVector, AuthSigBytes: testSig.Serialize(), - } - dbNode.AddPubKey(nodeKey) + }) err = d.db.AddNode(context.Background(), dbNode) if err != nil { return nil, err diff --git a/channeldb/db_test.go b/channeldb/db_test.go index ec2394a1c35..85785e9f6c6 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -811,16 +811,16 @@ func createNode(priv *btcec.PrivateKey) *models.Node { updateTime := rand.Int63() pub := priv.PubKey().SerializeCompressed() - n := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: time.Unix(updateTime, 0), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + string(pub), - Features: testFeatures, - Addresses: testAddrs, - } - copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed()) + var pubKey [33]byte + copy(pubKey[:], pub) + n := models.NewV1Node(pubKey, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: time.Unix(updateTime, 0), + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek" + string(pub), + Features: testFeatures.RawFeatureVector, + Addresses: testAddrs, + }) return n } diff --git a/discovery/chan_series.go b/discovery/chan_series.go index 8ecb3a4c883..050b82b1082 100644 --- a/discovery/chan_series.go +++ b/discovery/chan_series.go @@ -295,7 +295,7 @@ func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash, // If this edge has a validated node announcement, that // we haven't yet sent, then we'll send that as well. nodePub := channel.Node2.PubKeyBytes - hasNodeAnn := channel.Node2.HaveNodeAnnouncement + hasNodeAnn := channel.Node2.HaveAnnouncement() if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn { nodeAnn, err := channel.Node2.NodeAnnouncement( true, @@ -321,7 +321,7 @@ func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash, // If this edge has a validated node announcement, that // we haven't yet sent, then we'll send that as well. nodePub := channel.Node1.PubKeyBytes - hasNodeAnn := channel.Node1.HaveNodeAnnouncement + hasNodeAnn := channel.Node1.HaveAnnouncement() if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn { nodeAnn, err := channel.Node1.NodeAnnouncement( true, diff --git a/graph/builder.go b/graph/builder.go index 59e9b19d5da..91040dd088b 100644 --- a/graph/builder.go +++ b/graph/builder.go @@ -55,7 +55,7 @@ type Config struct { // Graph is the channel graph that the ChannelRouter will use to gather // metrics from and also to carry out path finding queries. - Graph DB + Graph *graphdb.ChannelGraph // Chain is the router's source to the most up-to-date blockchain data. // All incoming advertised channels will be checked against the chain diff --git a/graph/builder_test.go b/graph/builder_test.go index 0461c4fdc6a..d059a86e0ab 100644 --- a/graph/builder_test.go +++ b/graph/builder_test.go @@ -97,16 +97,18 @@ func TestIgnoreNodeAnnouncement(t *testing.T) { ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath) pub := priv1.PubKey() - node := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(123, 0), - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "node11", - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(node.PubKeyBytes[:], pub.SerializeCompressed()) + var pubBytes route.Vertex + copy(pubBytes[:], pub.SerializeCompressed()) + node := models.NewV1Node( + pubBytes, &models.NodeV1Fields{ + Addresses: testAddrs, + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + LastUpdate: time.Unix(123, 0), + Color: color.RGBA{1, 2, 3, 0}, + Alias: "node11", + }, + ) err := ctx.builder.AddNode(t.Context(), node) if !IsError(err, ErrIgnored) { @@ -1084,16 +1086,16 @@ func TestIsStaleNode(t *testing.T) { // With the node stub in the database, we'll add the fully node // announcement to the database. - n1 := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: updateTimeStamp, - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "node11", - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(n1.PubKeyBytes[:], priv1.PubKey().SerializeCompressed()) + var pubKey [33]byte + copy(pubKey[:], priv1.PubKey().SerializeCompressed()) + n1 := models.NewV1Node(pubKey, &models.NodeV1Fields{ + LastUpdate: updateTimeStamp, + Addresses: testAddrs, + Color: color.RGBA{1, 2, 3, 0}, + Alias: "node11", + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + }) if err := ctx.builder.AddNode(t.Context(), n1); err != nil { t.Fatalf("could not add node: %v", err) } @@ -1401,15 +1403,15 @@ func parseTestGraph(t *testing.T, useCache bool, path string) ( return nil, err } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: testTime, - Addresses: testAddrs, - Alias: node.Alias, - Features: testFeatures, - } - copy(dbNode.PubKeyBytes[:], pubBytes) + var pubKey [33]byte + copy(pubKey[:], pubBytes) + dbNode := models.NewV1Node(pubKey, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: testTime, + Addresses: testAddrs, + Alias: node.Alias, + Features: testFeatures.RawFeatureVector, + }) // We require all aliases within the graph to be unique for our // tests. @@ -1787,16 +1789,15 @@ func createTestGraphFromChannels(t *testing.T, useCache bool, features = lnwire.EmptyFeatureVector() } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: testTime, - Addresses: testAddrs, - Alias: alias, - Features: features, - } - - copy(dbNode.PubKeyBytes[:], pubKey.SerializeCompressed()) + var pubKeyBytes [33]byte + copy(pubKeyBytes[:], pubKey.SerializeCompressed()) + dbNode := models.NewV1Node(pubKeyBytes, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: testTime, + Addresses: testAddrs, + Alias: alias, + Features: features.RawFeatureVector, + }) privKeyMap[alias] = privKey diff --git a/graph/db/graph.go b/graph/db/graph.go index 08e287238cf..fc1ffa3002d 100644 --- a/graph/db/graph.go +++ b/graph/db/graph.go @@ -24,10 +24,6 @@ import ( var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down") // ChannelGraph is a layer above the graph's CRUD layer. -// -// NOTE: currently, this is purely a pass-through layer directly to the backing -// KVStore. Upcoming commits will move the graph cache out of the KVStore and -// into this layer so that the KVStore is only responsible for CRUD operations. type ChannelGraph struct { started atomic.Bool stopped atomic.Bool diff --git a/graph/db/graph_test.go b/graph/db/graph_test.go index ee5cf8dbf1c..dd04f8bdde1 100644 --- a/graph/db/graph_test.go +++ b/graph/db/graph_test.go @@ -70,19 +70,18 @@ var ( ) func createNode(priv *btcec.PrivateKey) *models.Node { - pub := priv.PubKey().SerializeCompressed() - n := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: nextUpdateTime(), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + hex.EncodeToString(pub), - Features: testFeatures, - Addresses: testAddrs, - } - copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed()) - - return n + var pubKey [33]byte + copy(pubKey[:], priv.PubKey().SerializeCompressed()) + return models.NewV1Node( + pubKey, &models.NodeV1Fields{ + LastUpdate: nextUpdateTime(), + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek" + hex.EncodeToString(pubKey[:]), + Addresses: testAddrs, + Features: testFeatures.RawFeatureVector, + AuthSigBytes: testSig.Serialize(), + }, + ) } func createTestVertex(t testing.TB) *models.Node { @@ -106,17 +105,18 @@ func TestNodeInsertionAndDeletion(t *testing.T) { timeStamp := int64(1232342) nodeWithAddrs := func(addrs []net.Addr) *models.Node { timeStamp++ - return &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: time.Unix(timeStamp, 0), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek", - Features: testFeatures, - Addresses: addrs, - ExtraOpaqueData: []byte{1, 1, 1, 2, 2, 2, 2}, - PubKeyBytes: testPub, - } + + return models.NewV1Node( + testPub, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: time.Unix(timeStamp, 0), + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek", + Features: testFeatures.RawFeatureVector, + Addresses: addrs, + ExtraOpaqueData: []byte{1, 1, 1, 2, 2, 2, 2}, + }, + ) } // First, insert the node into the graph DB. This should succeed @@ -315,12 +315,7 @@ func TestPartialNode(t *testing.T) { // The two nodes should match exactly! (with default values for // LastUpdate and db set to satisfy compareNodes()) - expectedNode1 := &models.Node{ - HaveNodeAnnouncement: false, - LastUpdate: time.Unix(0, 0), - PubKeyBytes: pubKey1, - Features: lnwire.EmptyFeatureVector(), - } + expectedNode1 := models.NewV1ShellNode(pubKey1) compareNodes(t, expectedNode1, dbNode1) _, exists, err = graph.HasNode(ctx, dbNode2.PubKeyBytes) @@ -329,12 +324,7 @@ func TestPartialNode(t *testing.T) { // The two nodes should match exactly! (with default values for // LastUpdate and db set to satisfy compareNodes()) - expectedNode2 := &models.Node{ - HaveNodeAnnouncement: false, - LastUpdate: time.Unix(0, 0), - PubKeyBytes: pubKey2, - Features: lnwire.EmptyFeatureVector(), - } + expectedNode2 := models.NewV1ShellNode(pubKey2) compareNodes(t, expectedNode2, dbNode2) // Next, delete the node from the graph, this should purge all data @@ -369,7 +359,7 @@ func TestAliasLookup(t *testing.T) { require.NoError(t, err, "unable to generate pubkey") dbAlias, err := graph.LookupAlias(ctx, nodePub) require.NoError(t, err, "unable to find alias") - require.Equal(t, testNode.Alias, dbAlias) + require.Equal(t, testNode.Alias.UnwrapOr(""), dbAlias) // Ensure that looking up a non-existent alias results in an error. node := createTestVertex(t) @@ -1604,7 +1594,7 @@ func fillTestGraph(t testing.TB, graph *ChannelGraph, numNodes, node := createTestVertex(t) nodes[i] = node - nodeIndex[node.Alias] = struct{}{} + nodeIndex[node.Alias.UnwrapOr("")] = struct{}{} } // Add each of the nodes into the graph, they should be inserted @@ -1616,7 +1606,7 @@ func fillTestGraph(t testing.TB, graph *ChannelGraph, numNodes, // Iterate over each node as returned by the graph, if all nodes are // reached, then the map created above should be empty. err := graph.ForEachNode(ctx, func(n *models.Node) error { - delete(nodeIndex, n.Alias) + delete(nodeIndex, n.Alias.UnwrapOr("")) return nil }, func() {}) require.NoError(t, err) @@ -2293,7 +2283,7 @@ func TestNodeUpdatesInHorizon(t *testing.T) { require.Len(t, resp, len(queryCase.resp)) for i := 0; i < len(resp); i++ { - compareNodes(t, &queryCase.resp[i], &resp[i]) + compareNodes(t, &queryCase.resp[i], resp[i]) } } } @@ -2491,7 +2481,7 @@ func TestNodeUpdatesInHorizonEarlyTermination(t *testing.T) { ) // Collect only up to stopAt nodes, breaking afterwards. - var collected []models.Node + var collected []*models.Node count := 0 for node := range iter { if count >= stopAt { @@ -3782,11 +3772,11 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { // a shell node present. node1, err := graph.FetchNode(ctx, node1.PubKeyBytes) require.NoError(t, err, "unable to fetch node1") - require.True(t, node1.HaveNodeAnnouncement) + require.True(t, node1.HaveAnnouncement()) node2, err = graph.FetchNode(ctx, node2.PubKeyBytes) require.NoError(t, err, "unable to fetch node2") - require.False(t, node2.HaveNodeAnnouncement) + require.False(t, node2.HaveAnnouncement()) // Show that attempting to add the channel again will result in an // error. @@ -3828,7 +3818,7 @@ func TestNodePruningUpdateIndexDeletion(t *testing.T) { t.Fatalf("should have 1 nodes instead have: %v", len(nodesInHorizon)) } - compareNodes(t, node1, &nodesInHorizon[0]) + compareNodes(t, node1, nodesInHorizon[0]) // We'll now delete the node from the graph, this should result in it // being removed from the update index as well. diff --git a/graph/db/interfaces.go b/graph/db/interfaces.go index 25eb6f5b1f3..2d4da91f540 100644 --- a/graph/db/interfaces.go +++ b/graph/db/interfaces.go @@ -111,7 +111,7 @@ type V1Store interface { //nolint:interfacebloat // by two nodes to quickly determine if they have the same set of up to // date node announcements. NodeUpdatesInHorizon(startTime, endTime time.Time, - opts ...IteratorOption) iter.Seq2[models.Node, error] + opts ...IteratorOption) iter.Seq2[*models.Node, error] // FetchNode attempts to look up a target node by its identity // public key. If the node isn't found in the database, then diff --git a/graph/db/kv_store.go b/graph/db/kv_store.go index cc9a14889d3..c8e6151b7bf 100644 --- a/graph/db/kv_store.go +++ b/graph/db/kv_store.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "errors" "fmt" + "image/color" "io" "iter" "math" @@ -468,7 +469,7 @@ func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo, chanID: chanID, }] - return cb(&info, policy1, policy2) + return cb(info, policy1, policy2) }, ) }, reset) @@ -556,7 +557,7 @@ func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo, } return cb( - models.NewCachedEdge(&info), + models.NewCachedEdge(info), cachedPolicy1, cachedPolicy2, ) }, @@ -848,7 +849,7 @@ func forEachNode(db kvdb.Backend, // Execute the callback, the transaction will abort if // this returns an error. - return cb(tx, &node) + return cb(tx, node) }) } @@ -945,12 +946,7 @@ func sourceNodeWithTx(nodes kvdb.RBucket) (*models.Node, error) { // With the pubKey of the source node retrieved, we're able to // fetch the full node information. - node, err := fetchLightningNode(nodes, selfPub) - if err != nil { - return nil, err - } - - return &node, nil + return fetchLightningNode(nodes, selfPub) } // SetSourceNode sets the source node within the graph database. The source @@ -1205,11 +1201,9 @@ func (c *KVStore) addChannelEdge(tx kvdb.RwTx, _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:]) switch { case errors.Is(node1Err, ErrGraphNodeNotFound): - node1Shell := models.Node{ - PubKeyBytes: edge.NodeKey1Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node1Shell) + err := addLightningNode( + tx, models.NewV1ShellNode(edge.NodeKey1Bytes), + ) if err != nil { return fmt.Errorf("unable to create shell node "+ "for: %x: %w", edge.NodeKey1Bytes, err) @@ -1221,11 +1215,9 @@ func (c *KVStore) addChannelEdge(tx kvdb.RwTx, _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:]) switch { case errors.Is(node2Err, ErrGraphNodeNotFound): - node2Shell := models.Node{ - PubKeyBytes: edge.NodeKey2Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node2Shell) + err := addLightningNode( + tx, models.NewV1ShellNode(edge.NodeKey2Bytes), + ) if err != nil { return fmt.Errorf("unable to create shell node "+ "for: %x: %w", edge.NodeKey2Bytes, err) @@ -1401,7 +1393,7 @@ func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID, edge.AuthProof = proof - return putChanEdgeInfo(edgeIndex, &edge, chanKey) + return putChanEdgeInfo(edgeIndex, edge, chanKey) }, func() {}) } @@ -2246,11 +2238,11 @@ func (c *KVStore) fetchNextChanUpdateBatch( // Now we have all the information we need to build the // channel edge. channel := ChannelEdge{ - Info: &edgeInfo, + Info: edgeInfo, Policy1: edge1, Policy2: edge2, - Node1: &node1, - Node2: &node2, + Node1: node1, + Node2: node2, } state.edgesSeen[chanIDInt] = struct{}{} @@ -2396,10 +2388,10 @@ func newNodeUpdatesIterator(batchSize int, startTime, endTime time.Time, // fetchNextNodeBatch fetches the next batch of node announcements using the // iterator state. func (c *KVStore) fetchNextNodeBatch( - state *nodeUpdatesIterator) ([]models.Node, bool, error) { + state *nodeUpdatesIterator) ([]*models.Node, bool, error) { var ( - nodeBatch []models.Node + nodeBatch []*models.Node hasMore bool ) @@ -2538,14 +2530,14 @@ func (c *KVStore) fetchNextNodeBatch( // update timestamp within the passed range. func (c *KVStore) NodeUpdatesInHorizon(startTime, endTime time.Time, - opts ...IteratorOption) iter.Seq2[models.Node, error] { + opts ...IteratorOption) iter.Seq2[*models.Node, error] { cfg := defaultIteratorConfig() for _, opt := range opts { opt(cfg) } - return func(yield func(models.Node, error) bool) { + return func(yield func(*models.Node, error) bool) { // Initialize iterator state. state := newNodeUpdatesIterator( cfg.nodeUpdateIterBatchSize, @@ -2559,7 +2551,7 @@ func (c *KVStore) NodeUpdatesInHorizon(startTime, log.Errorf("unable to read node updates in "+ "horizon: %v", err) - yield(models.Node{}, err) + yield(&models.Node{}, err) return } @@ -2792,7 +2784,7 @@ func (c *KVStore) FilterChannelRange(startHeight, continue } - node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo) + node1Key, node2Key := computeEdgePolicyKeys(edgeInfo) rawPolicy := edges.Get(node1Key) if len(rawPolicy) != 0 { @@ -2944,11 +2936,11 @@ func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) ( } chanEdges = append(chanEdges, ChannelEdge{ - Info: &edgeInfo, + Info: edgeInfo, Policy1: edge1, Policy2: edge2, - Node1: &node1, - Node2: &node2, + Node1: node1, + Node2: node2, }) } @@ -3095,7 +3087,7 @@ func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex, // being removed due to the channel becoming a zombie. We do this to // ensure we don't store unnecessary data for spent channels. if !isZombie { - return &edgeInfo, nil + return edgeInfo, nil } nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes @@ -3114,7 +3106,7 @@ func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex, ) } - return &edgeInfo, markEdgeZombie( + return edgeInfo, markEdgeZombie( zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2, ) } @@ -3413,7 +3405,7 @@ func (c *KVStore) fetchLightningNode(tx kvdb.RTx, return err } - node = &n + node = n return nil } @@ -3559,7 +3551,7 @@ func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend, } // Finally, we execute the callback. - err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy) + err = cb(tx, edgeInfo, outgoingPolicy, incomingPolicy) if err != nil { return err } @@ -3696,7 +3688,7 @@ func (c *KVStore) fetchOtherNode(tx kvdb.RTx, return err } - targetNode = &node + targetNode = node return nil } @@ -3789,7 +3781,7 @@ func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) ( if err != nil { return fmt.Errorf("%w: chanID=%x", err, chanID) } - edgeInfo = &edge + edgeInfo = edge // Once we have the information about the channels' parameters, // we'll fetch the routing policies for each for the directed @@ -3895,7 +3887,7 @@ func (c *KVStore) FetchChannelEdgesByID(chanID uint64) ( return err } - edgeInfo = &edge + edgeInfo = edge // Then we'll attempt to fetch the accompanying policies of this // edge. @@ -4369,7 +4361,7 @@ func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket, // If we got a node announcement for this node, we will have the rest // of the data available. If not we don't have more data to write. - if !node.HaveNodeAnnouncement { + if !node.HaveAnnouncement() { // Write HaveNodeAnnouncement=0. byteOrder.PutUint16(scratch[:2], 0) if _, err := b.Write(scratch[:2]); err != nil { @@ -4385,17 +4377,20 @@ func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket, return err } - if err := binary.Write(&b, byteOrder, node.Color.R); err != nil { + nodeColor := node.Color.UnwrapOr(color.RGBA{}) + + if err := binary.Write(&b, byteOrder, nodeColor.R); err != nil { return err } - if err := binary.Write(&b, byteOrder, node.Color.G); err != nil { + if err := binary.Write(&b, byteOrder, nodeColor.G); err != nil { return err } - if err := binary.Write(&b, byteOrder, node.Color.B); err != nil { + if err := binary.Write(&b, byteOrder, nodeColor.B); err != nil { return err } - if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { + err = wire.WriteVarString(&b, 0, node.Alias.UnwrapOr("")) + if err != nil { return err } @@ -4434,7 +4429,8 @@ func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket, return err } - if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { + err = aliasBucket.Put(nodePub, []byte(node.Alias.UnwrapOr(""))) + if err != nil { return err } @@ -4468,11 +4464,11 @@ func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket, } func fetchLightningNode(nodeBucket kvdb.RBucket, - nodePub []byte) (models.Node, error) { + nodePub []byte) (*models.Node, error) { nodeBytes := nodeBucket.Get(nodePub) if nodeBytes == nil { - return models.Node{}, ErrGraphNodeNotFound + return nil, ErrGraphNodeNotFound } nodeReader := bytes.NewReader(nodeBytes) @@ -4535,69 +4531,65 @@ func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex, return pubKey, features, nil } -func deserializeLightningNode(r io.Reader) (models.Node, error) { +func deserializeLightningNode(r io.Reader) (*models.Node, error) { var ( - node models.Node scratch [8]byte err error + pubKey [33]byte ) - // Always populate a feature vector, even if we don't have a node - // announcement and short circuit below. - node.Features = lnwire.EmptyFeatureVector() - if _, err := r.Read(scratch[:]); err != nil { - return models.Node{}, err + return nil, err } unix := int64(byteOrder.Uint64(scratch[:])) - node.LastUpdate = time.Unix(unix, 0) + lastUpdate := time.Unix(unix, 0) - if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil { - return models.Node{}, err + if _, err := io.ReadFull(r, pubKey[:]); err != nil { + return nil, err } + node := models.NewV1ShellNode(pubKey) + node.LastUpdate = lastUpdate + if _, err := r.Read(scratch[:2]); err != nil { - return models.Node{}, err + return nil, err } hasNodeAnn := byteOrder.Uint16(scratch[:2]) - if hasNodeAnn == 1 { - node.HaveNodeAnnouncement = true - } else { - node.HaveNodeAnnouncement = false - } - // The rest of the data is optional, and will only be there if we got a // node announcement for this node. - if !node.HaveNodeAnnouncement { + if hasNodeAnn == 0 { return node, nil } // We did get a node announcement for this node, so we'll have the rest // of the data available. - if err := binary.Read(r, byteOrder, &node.Color.R); err != nil { - return models.Node{}, err + var nodeColor color.RGBA + if err := binary.Read(r, byteOrder, &nodeColor.R); err != nil { + return nil, err } - if err := binary.Read(r, byteOrder, &node.Color.G); err != nil { - return models.Node{}, err + if err := binary.Read(r, byteOrder, &nodeColor.G); err != nil { + return nil, err } - if err := binary.Read(r, byteOrder, &node.Color.B); err != nil { - return models.Node{}, err + if err := binary.Read(r, byteOrder, &nodeColor.B); err != nil { + return nil, err } + node.Color = fn.Some(nodeColor) - node.Alias, err = wire.ReadVarString(r, 0) + alias, err := wire.ReadVarString(r, 0) if err != nil { - return models.Node{}, err + return nil, err } + node.Alias = fn.Some(alias) err = node.Features.Decode(r) if err != nil { - return models.Node{}, err + return nil, err } if _, err := r.Read(scratch[:2]); err != nil { - return models.Node{}, err + return nil, err } numAddresses := int(byteOrder.Uint16(scratch[:2])) @@ -4605,7 +4597,7 @@ func deserializeLightningNode(r io.Reader) (models.Node, error) { for i := 0; i < numAddresses; i++ { address, err := DeserializeAddr(r) if err != nil { - return models.Node{}, err + return nil, err } addresses = append(addresses, address) } @@ -4613,7 +4605,7 @@ func deserializeLightningNode(r io.Reader) (models.Node, error) { node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") if err != nil { - return models.Node{}, err + return nil, err } // We'll try and see if there are any opaque bytes left, if not, then @@ -4625,7 +4617,7 @@ func deserializeLightningNode(r io.Reader) (models.Node, error) { case errors.Is(err, io.ErrUnexpectedEOF): case errors.Is(err, io.EOF): case err != nil: - return models.Node{}, err + return nil, err } if len(extraBytes) > 0 { @@ -4710,11 +4702,11 @@ func putChanEdgeInfo(edgeIndex kvdb.RwBucket, } func fetchChanEdgeInfo(edgeIndex kvdb.RBucket, - chanID []byte) (models.ChannelEdgeInfo, error) { + chanID []byte) (*models.ChannelEdgeInfo, error) { edgeInfoBytes := edgeIndex.Get(chanID) if edgeInfoBytes == nil { - return models.ChannelEdgeInfo{}, ErrEdgeNotFound + return nil, ErrEdgeNotFound } edgeInfoReader := bytes.NewReader(edgeInfoBytes) @@ -4722,34 +4714,34 @@ func fetchChanEdgeInfo(edgeIndex kvdb.RBucket, return deserializeChanEdgeInfo(edgeInfoReader) } -func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { +func deserializeChanEdgeInfo(r io.Reader) (*models.ChannelEdgeInfo, error) { var ( err error edgeInfo models.ChannelEdgeInfo ) if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features") if err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } features := lnwire.NewRawFeatureVector() err = features.Decode(bytes.NewReader(featureBytes)) if err != nil { - return models.ChannelEdgeInfo{}, fmt.Errorf("unable to decode "+ + return nil, fmt.Errorf("unable to decode "+ "features: %w", err) } edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features) @@ -4758,19 +4750,19 @@ func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") if err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") if err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") if err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") if err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if !proof.IsEmpty() { @@ -4779,17 +4771,17 @@ func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { edgeInfo.ChannelPoint = wire.OutPoint{} if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { - return models.ChannelEdgeInfo{}, err + return nil, err } // We'll try and see if there are any opaque bytes left, if not, then @@ -4801,10 +4793,10 @@ func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { case errors.Is(err, io.ErrUnexpectedEOF): case errors.Is(err, io.EOF): case err != nil: - return models.ChannelEdgeInfo{}, err + return nil, err } - return edgeInfo, nil + return &edgeInfo, nil } func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy, diff --git a/graph/db/models/channel_auth_proof.go b/graph/db/models/channel_auth_proof.go index 13413946748..daf120b10d2 100644 --- a/graph/db/models/channel_auth_proof.go +++ b/graph/db/models/channel_auth_proof.go @@ -1,7 +1,5 @@ package models -import "github.com/btcsuite/btcd/btcec/v2/ecdsa" - // ChannelAuthProof is the authentication proof (the signature portion) for a // channel. Using the four signatures contained in the struct, and some // auxiliary knowledge (the funding script, node identities, and outpoint) nodes @@ -10,117 +8,23 @@ import "github.com/btcsuite/btcd/btcec/v2/ecdsa" // nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len || // features. type ChannelAuthProof struct { - // nodeSig1 is a cached instance of the first node signature. - nodeSig1 *ecdsa.Signature - // NodeSig1Bytes are the raw bytes of the first node signature encoded // in DER format. NodeSig1Bytes []byte - // nodeSig2 is a cached instance of the second node signature. - nodeSig2 *ecdsa.Signature - // NodeSig2Bytes are the raw bytes of the second node signature // encoded in DER format. NodeSig2Bytes []byte - // bitcoinSig1 is a cached instance of the first bitcoin signature. - bitcoinSig1 *ecdsa.Signature - // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature // encoded in DER format. BitcoinSig1Bytes []byte - // bitcoinSig2 is a cached instance of the second bitcoin signature. - bitcoinSig2 *ecdsa.Signature - // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature // encoded in DER format. BitcoinSig2Bytes []byte } -// Node1Sig is the signature using the identity key of the node that is first -// in a lexicographical ordering of the serialized public keys of the two nodes -// that created the channel. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) Node1Sig() (*ecdsa.Signature, error) { - if c.nodeSig1 != nil { - return c.nodeSig1, nil - } - - sig, err := ecdsa.ParseSignature(c.NodeSig1Bytes) - if err != nil { - return nil, err - } - - c.nodeSig1 = sig - - return sig, nil -} - -// Node2Sig is the signature using the identity key of the node that is second -// in a lexicographical ordering of the serialized public keys of the two nodes -// that created the channel. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) Node2Sig() (*ecdsa.Signature, error) { - if c.nodeSig2 != nil { - return c.nodeSig2, nil - } - - sig, err := ecdsa.ParseSignature(c.NodeSig2Bytes) - if err != nil { - return nil, err - } - - c.nodeSig2 = sig - - return sig, nil -} - -// BitcoinSig1 is the signature using the public key of the first node that was -// used in the channel's multi-sig output. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) BitcoinSig1() (*ecdsa.Signature, error) { - if c.bitcoinSig1 != nil { - return c.bitcoinSig1, nil - } - - sig, err := ecdsa.ParseSignature(c.BitcoinSig1Bytes) - if err != nil { - return nil, err - } - - c.bitcoinSig1 = sig - - return sig, nil -} - -// BitcoinSig2 is the signature using the public key of the second node that -// was used in the channel's multi-sig output. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) BitcoinSig2() (*ecdsa.Signature, error) { - if c.bitcoinSig2 != nil { - return c.bitcoinSig2, nil - } - - sig, err := ecdsa.ParseSignature(c.BitcoinSig2Bytes) - if err != nil { - return nil, err - } - - c.bitcoinSig2 = sig - - return sig, nil -} - // IsEmpty check is the authentication proof is empty Proof is empty if at // least one of the signatures are equal to nil. func (c *ChannelAuthProof) IsEmpty() bool { diff --git a/graph/db/models/channel_edge_info.go b/graph/db/models/channel_edge_info.go index d19287571ae..b86c140bc1d 100644 --- a/graph/db/models/channel_edge_info.go +++ b/graph/db/models/channel_edge_info.go @@ -26,9 +26,6 @@ type ChannelEdgeInfo struct { // ChainHash is the hash that uniquely identifies the chain that this // channel was opened within. - // - // TODO(roasbeef): need to modify db keying for multi-chain - // * must add chain hash to prefix as well ChainHash chainhash.Hash // NodeKey1Bytes is the raw public key of the first node. @@ -41,11 +38,9 @@ type ChannelEdgeInfo struct { // BitcoinKey1Bytes is the raw public key of the first node. BitcoinKey1Bytes [33]byte - bitcoinKey1 *btcec.PublicKey // BitcoinKey2Bytes is the raw public key of the first node. BitcoinKey2Bytes [33]byte - bitcoinKey2 *btcec.PublicKey // Features is the list of protocol features supported by this channel // edge. @@ -79,24 +74,6 @@ type ChannelEdgeInfo struct { ExtraOpaqueData []byte } -// AddNodeKeys is a setter-like method that can be used to replace the set of -// keys for the target ChannelEdgeInfo. -func (c *ChannelEdgeInfo) AddNodeKeys(nodeKey1, nodeKey2, bitcoinKey1, - bitcoinKey2 *btcec.PublicKey) { - - c.nodeKey1 = nodeKey1 - copy(c.NodeKey1Bytes[:], c.nodeKey1.SerializeCompressed()) - - c.nodeKey2 = nodeKey2 - copy(c.NodeKey2Bytes[:], nodeKey2.SerializeCompressed()) - - c.bitcoinKey1 = bitcoinKey1 - copy(c.BitcoinKey1Bytes[:], c.bitcoinKey1.SerializeCompressed()) - - c.bitcoinKey2 = bitcoinKey2 - copy(c.BitcoinKey2Bytes[:], bitcoinKey2.SerializeCompressed()) -} - // NodeKey1 is the identity public key of the "first" node that was involved in // the creation of this channel. A node is considered "first" if the // lexicographical ordering the its serialized public key is "smaller" than @@ -139,46 +116,6 @@ func (c *ChannelEdgeInfo) NodeKey2() (*btcec.PublicKey, error) { return key, nil } -// BitcoinKey1 is the Bitcoin multi-sig key belonging to the first node, that -// was involved in the funding transaction that originally created the channel -// that this struct represents. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) BitcoinKey1() (*btcec.PublicKey, error) { - if c.bitcoinKey1 != nil { - return c.bitcoinKey1, nil - } - - key, err := btcec.ParsePubKey(c.BitcoinKey1Bytes[:]) - if err != nil { - return nil, err - } - c.bitcoinKey1 = key - - return key, nil -} - -// BitcoinKey2 is the Bitcoin multi-sig key belonging to the second node, that -// was involved in the funding transaction that originally created the channel -// that this struct represents. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) BitcoinKey2() (*btcec.PublicKey, error) { - if c.bitcoinKey2 != nil { - return c.bitcoinKey2, nil - } - - key, err := btcec.ParsePubKey(c.BitcoinKey2Bytes[:]) - if err != nil { - return nil, err - } - c.bitcoinKey2 = key - - return key, nil -} - // OtherNodeKeyBytes returns the node key bytes of the other end of the channel. func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) ( [33]byte, error) { diff --git a/graph/db/models/node.go b/graph/db/models/node.go index 23d6a426890..84e6bf9bcce 100644 --- a/graph/db/models/node.go +++ b/graph/db/models/node.go @@ -7,8 +7,9 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/btcec/v2/ecdsa" + "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" ) // Node represents an individual vertex/node within the channel graph. @@ -16,15 +17,13 @@ import ( // from it. As the graph is directed, a node will also have an incoming edge // attached to it for each outgoing edge. type Node struct { + // Version is the gossip version that this node was advertised on. + Version lnwire.GossipVersion + // PubKeyBytes is the raw bytes of the public key of the target node. PubKeyBytes [33]byte pubKey *btcec.PublicKey - // HaveNodeAnnouncement indicates whether we received a node - // announcement for this particular node. If true, the remaining fields - // will be set, if false only the PubKey is known for this node. - HaveNodeAnnouncement bool - // LastUpdate is the last time the vertex information for this node has // been updated. LastUpdate time.Time @@ -33,11 +32,11 @@ type Node struct { Addresses []net.Addr // Color is the selected color for the node. - Color color.RGBA + Color fn.Option[color.RGBA] // Alias is a nick-name for the node. The alias can be used to confirm // a node's identity or to serve as a short ID for an address book. - Alias string + Alias fn.Option[string] // AuthSigBytes is the raw signature under the advertised public key // which serves to authenticate the attributes announced by this node. @@ -53,11 +52,79 @@ type Node struct { // and ensure we're able to make upgrades to the network in a forwards // compatible manner. ExtraOpaqueData []byte +} + +// NodeV1Fields houses the fields that are specific to a version 1 node +// announcement. +type NodeV1Fields struct { + // Address is the TCP address this node is reachable over. + Addresses []net.Addr + + // AuthSigBytes is the raw signature under the advertised public key + // which serves to authenticate the attributes announced by this node. + AuthSigBytes []byte + + // Features is the list of protocol features supported by this node. + Features *lnwire.RawFeatureVector + + // Color is the selected color for the node. + Color color.RGBA - // TODO(roasbeef): discovery will need storage to keep it's last IP - // address and re-announce if interface changes? + // Alias is a nick-name for the node. The alias can be used to confirm + // a node's identity or to serve as a short ID for an address book. + Alias string - // TODO(roasbeef): add update method and fetch? + // LastUpdate is the last time the vertex information for this node has + // been updated. + LastUpdate time.Time + + // ExtraOpaqueData is the set of data that was appended to this + // message, some of which we may not actually know how to iterate or + // parse. By holding onto this data, we ensure that we're able to + // properly validate the set of signatures that cover these new fields, + // and ensure we're able to make upgrades to the network in a forwards + // compatible manner. + ExtraOpaqueData []byte +} + +// NewV1Node creates a new version 1 node from the passed fields. +func NewV1Node(pub route.Vertex, n *NodeV1Fields) *Node { + return &Node{ + Version: lnwire.GossipVersion1, + PubKeyBytes: pub, + Addresses: n.Addresses, + AuthSigBytes: n.AuthSigBytes, + Features: lnwire.NewFeatureVector( + n.Features, lnwire.Features, + ), + Color: fn.Some(n.Color), + Alias: fn.Some(n.Alias), + LastUpdate: n.LastUpdate, + ExtraOpaqueData: n.ExtraOpaqueData, + } +} + +// NewV1ShellNode creates a new shell version 1 node. +func NewV1ShellNode(pubKey route.Vertex) *Node { + return NewShellNode(lnwire.GossipVersion1, pubKey) +} + +// NewShellNode creates a new shell node with the given gossip version and +// public key. +func NewShellNode(v lnwire.GossipVersion, pubKey route.Vertex) *Node { + return &Node{ + Version: v, + PubKeyBytes: pubKey, + Features: lnwire.NewFeatureVector(nil, lnwire.Features), + LastUpdate: time.Unix(0, 0), + } +} + +// HaveAnnouncement returns true if we have received a node announcement for +// this node. We determine this by checking if we a signature for the +// announcement. +func (n *Node) HaveAnnouncement() bool { + return len(n.AuthSigBytes) > 0 } // PubKey is the node's long-term identity public key. This key will be used to @@ -65,64 +132,48 @@ type Node struct { // // NOTE: By having this method to access an attribute, we ensure we only need // to fully deserialize the pubkey if absolutely necessary. -func (l *Node) PubKey() (*btcec.PublicKey, error) { - if l.pubKey != nil { - return l.pubKey, nil +func (n *Node) PubKey() (*btcec.PublicKey, error) { + if n.pubKey != nil { + return n.pubKey, nil } - key, err := btcec.ParsePubKey(l.PubKeyBytes[:]) + key, err := btcec.ParsePubKey(n.PubKeyBytes[:]) if err != nil { return nil, err } - l.pubKey = key + n.pubKey = key return key, nil } -// AuthSig is a signature under the advertised public key which serves to -// authenticate the attributes announced by this node. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (l *Node) AuthSig() (*ecdsa.Signature, error) { - return ecdsa.ParseSignature(l.AuthSigBytes) -} - -// AddPubKey is a setter-link method that can be used to swap out the public -// key for a node. -func (l *Node) AddPubKey(key *btcec.PublicKey) { - l.pubKey = key - copy(l.PubKeyBytes[:], key.SerializeCompressed()) -} - // NodeAnnouncement retrieves the latest node announcement of the node. -func (l *Node) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement1, +func (n *Node) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement1, error) { - if !l.HaveNodeAnnouncement { + if !n.HaveAnnouncement() { return nil, fmt.Errorf("node does not have node announcement") } - alias, err := lnwire.NewNodeAlias(l.Alias) + alias, err := lnwire.NewNodeAlias(n.Alias.UnwrapOr("")) if err != nil { return nil, err } nodeAnn := &lnwire.NodeAnnouncement1{ - Features: l.Features.RawFeatureVector, - NodeID: l.PubKeyBytes, - RGBColor: l.Color, + Features: n.Features.RawFeatureVector, + NodeID: n.PubKeyBytes, + RGBColor: n.Color.UnwrapOr(color.RGBA{}), Alias: alias, - Addresses: l.Addresses, - Timestamp: uint32(l.LastUpdate.Unix()), - ExtraOpaqueData: l.ExtraOpaqueData, + Addresses: n.Addresses, + Timestamp: uint32(n.LastUpdate.Unix()), + ExtraOpaqueData: n.ExtraOpaqueData, } if !signed { return nodeAnn, nil } - sig, err := lnwire.NewSigFromECDSARawSignature(l.AuthSigBytes) + sig, err := lnwire.NewSigFromECDSARawSignature(n.AuthSigBytes) if err != nil { return nil, err } @@ -138,15 +189,16 @@ func NodeFromWireAnnouncement(msg *lnwire.NodeAnnouncement1) *Node { timestamp := time.Unix(int64(msg.Timestamp), 0) features := lnwire.NewFeatureVector(msg.Features, lnwire.Features) - return &Node{ - HaveNodeAnnouncement: true, - LastUpdate: timestamp, - Addresses: msg.Addresses, - PubKeyBytes: msg.NodeID, - Alias: msg.Alias.String(), - AuthSigBytes: msg.Signature.ToSignatureBytes(), - Features: features, - Color: msg.RGBColor, - ExtraOpaqueData: msg.ExtraOpaqueData, - } + return NewV1Node( + msg.NodeID, + &NodeV1Fields{ + LastUpdate: timestamp, + Addresses: msg.Addresses, + Alias: msg.Alias.String(), + AuthSigBytes: msg.Signature.ToSignatureBytes(), + Features: features.RawFeatureVector, + Color: msg.RGBColor, + ExtraOpaqueData: msg.ExtraOpaqueData, + }, + ) } diff --git a/graph/db/notifications.go b/graph/db/notifications.go index eecc38c27d4..54a748c66ec 100644 --- a/graph/db/notifications.go +++ b/graph/db/notifications.go @@ -391,9 +391,11 @@ func (c *ChannelGraph) addToTopologyChange(update *TopologyChange, nodeUpdate := &NetworkNodeUpdate{ Addresses: m.Addresses, IdentityKey: pubKey, - Alias: m.Alias, - Color: EncodeHexColor(m.Color), - Features: m.Features.Clone(), + Alias: m.Alias.UnwrapOr(""), + Color: EncodeHexColor( + m.Color.UnwrapOr(color.RGBA{}), + ), + Features: m.Features.Clone(), } update.NodeUpdates = append(update.NodeUpdates, nodeUpdate) diff --git a/graph/db/sql_migration.go b/graph/db/sql_migration.go index e737718006d..5b5f6bf40a0 100644 --- a/graph/db/sql_migration.go +++ b/graph/db/sql_migration.go @@ -7,6 +7,7 @@ import ( "database/sql" "errors" "fmt" + "image/color" "net" "slices" "time" @@ -1449,10 +1450,12 @@ func insertNodeSQLMig(ctx context.Context, db SQLQueries, PubKey: node.PubKeyBytes[:], } - if node.HaveNodeAnnouncement { + if node.HaveAnnouncement() { params.LastUpdate = sqldb.SQLInt64(node.LastUpdate.Unix()) - params.Color = sqldb.SQLStrValid(EncodeHexColor(node.Color)) - params.Alias = sqldb.SQLStrValid(node.Alias) + params.Color = sqldb.SQLStrValid( + EncodeHexColor(node.Color.UnwrapOr(color.RGBA{})), + ) + params.Alias = sqldb.SQLStrValid(node.Alias.UnwrapOr("")) params.Signature = node.AuthSigBytes } @@ -1463,7 +1466,7 @@ func insertNodeSQLMig(ctx context.Context, db SQLQueries, } // We can exit here if we don't have the announcement yet. - if !node.HaveNodeAnnouncement { + if !node.HaveAnnouncement() { return nodeID, nil } diff --git a/graph/db/sql_migration_test.go b/graph/db/sql_migration_test.go index 66a19843842..9819cf7f910 100644 --- a/graph/db/sql_migration_test.go +++ b/graph/db/sql_migration_test.go @@ -384,10 +384,7 @@ func TestMigrateGraphToSQL(t *testing.T) { // The PruneGraph call requires that the source // node be set. So that is the first object // we will write. - &models.Node{ - HaveNodeAnnouncement: false, - PubKeyBytes: testPub, - }, + models.NewV1ShellNode(testPub), // Now we add some block heights to prune // the graph at. uint32(1), uint32(2), uint32(20), uint32(3), @@ -747,17 +744,15 @@ type testNodeOpt func(*models.Node) // makeTestNode can be used to create a test models.Node. The // functional options can be used to modify the node's attributes. func makeTestNode(t *testing.T, opts ...testNodeOpt) *models.Node { - n := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSigBytes, - LastUpdate: testTime, - Color: testColor, - Alias: "kek", - Features: testFeatures, - Addresses: testAddrs, - ExtraOpaqueData: testExtraData, - PubKeyBytes: genPubKey(t), - } + n := models.NewV1Node(genPubKey(t), &models.NodeV1Fields{ + AuthSigBytes: testSigBytes, + LastUpdate: testTime, + Color: testColor, + Alias: "kek", + Features: testFeatures.RawFeatureVector, + Addresses: testAddrs, + ExtraOpaqueData: testExtraData, + }) for _, opt := range opts { opt(n) @@ -776,12 +771,7 @@ func makeTestNode(t *testing.T, opts ...testNodeOpt) *models.Node { func makeTestShellNode(t *testing.T, opts ...testNodeOpt) *models.Node { - n := &models.Node{ - HaveNodeAnnouncement: false, - PubKeyBytes: genPubKey(t), - Features: testEmptyFeatures, - LastUpdate: time.Unix(0, 0), - } + n := models.NewV1ShellNode(genPubKey(t)) for _, opt := range opts { opt(n) @@ -1816,19 +1806,15 @@ func genRandomNode(t *rapid.T) *models.Node { extraOpaqueData = nil } - node := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: sigBytes, - LastUpdate: randTime, - Color: randColor, - Alias: alias.String(), - Features: lnwire.NewFeatureVector( - features, lnwire.Features, - ), + node := models.NewV1Node(pubKeyBytes, &models.NodeV1Fields{ + AuthSigBytes: sigBytes, + LastUpdate: randTime, + Color: randColor, + Alias: alias.String(), + Features: features, Addresses: addrs, ExtraOpaqueData: extraOpaqueData, - PubKeyBytes: pubKeyBytes, - } + }) // We call this method so that the internal pubkey field is populated // which then lets us to proper struct comparison later on. diff --git a/graph/db/sql_store.go b/graph/db/sql_store.go index c9283df88d8..9e052d60db5 100644 --- a/graph/db/sql_store.go +++ b/graph/db/sql_store.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "errors" "fmt" + color "image/color" "iter" "maps" "math" @@ -555,14 +556,14 @@ func (s *SQLStore) SetSourceNode(ctx context.Context, // // NOTE: This is part of the V1Store interface. func (s *SQLStore) NodeUpdatesInHorizon(startTime, endTime time.Time, - opts ...IteratorOption) iter.Seq2[models.Node, error] { + opts ...IteratorOption) iter.Seq2[*models.Node, error] { cfg := defaultIteratorConfig() for _, opt := range opts { opt(cfg) } - return func(yield func(models.Node, error) bool) { + return func(yield func(*models.Node, error) bool) { var ( ctx = context.TODO() lastUpdateTime sql.NullInt64 @@ -573,7 +574,7 @@ func (s *SQLStore) NodeUpdatesInHorizon(startTime, endTime time.Time, // Each iteration, we'll read a batch amount of nodes, yield // them, then decide is we have more or not. for hasMore { - var batch []models.Node + var batch []*models.Node //nolint:ll err := s.db.ExecTx(ctx, sqldb.ReadTxOpt(), func(db SQLQueries) error { @@ -607,7 +608,7 @@ func (s *SQLStore) NodeUpdatesInHorizon(startTime, endTime time.Time, err = forEachNodeInBatch( ctx, s.cfg.QueryCfg, db, rows, func(_ int64, node *models.Node) error { - batch = append(batch, *node) + batch = append(batch, node) // Update pagination cursors // based on the last processed @@ -629,14 +630,14 @@ func (s *SQLStore) NodeUpdatesInHorizon(startTime, endTime time.Time, return nil }, func() { - batch = []models.Node{} + clear(batch) }) if err != nil { log.Errorf("NodeUpdatesInHorizon batch "+ "error: %v", err) - yield(models.Node{}, err) + yield(&models.Node{}, err) return } @@ -3485,28 +3486,30 @@ func buildNodeWithBatchData(dbNode sqlc.GraphNode, var pub [33]byte copy(pub[:], dbNode.PubKey) - node := &models.Node{ - PubKeyBytes: pub, - Features: lnwire.EmptyFeatureVector(), - LastUpdate: time.Unix(0, 0), - } + node := models.NewV1ShellNode(pub) if len(dbNode.Signature) == 0 { return node, nil } - node.HaveNodeAnnouncement = true node.AuthSigBytes = dbNode.Signature - node.Alias = dbNode.Alias.String - node.LastUpdate = time.Unix(dbNode.LastUpdate.Int64, 0) + + if dbNode.Alias.Valid { + node.Alias = fn.Some(dbNode.Alias.String) + } + if dbNode.LastUpdate.Valid { + node.LastUpdate = time.Unix(dbNode.LastUpdate.Int64, 0) + } var err error if dbNode.Color.Valid { - node.Color, err = DecodeHexColor(dbNode.Color.String) + nodeColor, err := DecodeHexColor(dbNode.Color.String) if err != nil { return nil, fmt.Errorf("unable to decode color: %w", err) } + + node.Color = fn.Some(nodeColor) } // Use preloaded features. @@ -3608,10 +3611,27 @@ func upsertNode(ctx context.Context, db SQLQueries, PubKey: node.PubKeyBytes[:], } - if node.HaveNodeAnnouncement { - params.LastUpdate = sqldb.SQLInt64(node.LastUpdate.Unix()) - params.Color = sqldb.SQLStrValid(EncodeHexColor(node.Color)) - params.Alias = sqldb.SQLStrValid(node.Alias) + if node.HaveAnnouncement() { + switch node.Version { + case lnwire.GossipVersion1: + params.LastUpdate = sqldb.SQLInt64( + node.LastUpdate.Unix(), + ) + + case lnwire.GossipVersion2: + + default: + return 0, fmt.Errorf("unknown gossip version: %d", + node.Version) + } + + node.Color.WhenSome(func(rgba color.RGBA) { + params.Color = sqldb.SQLStrValid(EncodeHexColor(rgba)) + }) + node.Alias.WhenSome(func(s string) { + params.Alias = sqldb.SQLStrValid(s) + }) + params.Signature = node.AuthSigBytes } @@ -3622,7 +3642,7 @@ func upsertNode(ctx context.Context, db SQLQueries, } // We can exit here if we don't have the announcement yet. - if !node.HaveNodeAnnouncement { + if !node.HaveAnnouncement() { return nodeID, nil } diff --git a/graph/interfaces.go b/graph/interfaces.go index 0896a0850a9..75f47558a3c 100644 --- a/graph/interfaces.go +++ b/graph/interfaces.go @@ -2,13 +2,9 @@ package graph import ( "context" - "iter" "time" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/batch" - graphdb "github.com/lightningnetwork/lnd/graph/db" "github.com/lightningnetwork/lnd/graph/db/models" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" @@ -96,183 +92,3 @@ type ChannelGraphSource interface { // currently marked as a zombie edge. IsZombieEdge(chanID lnwire.ShortChannelID) (bool, error) } - -// DB is an interface describing a persisted Lightning Network graph. -// -//nolint:interfacebloat -type DB interface { - // PruneTip returns the block height and hash of the latest block that - // has been used to prune channels in the graph. Knowing the "prune tip" - // allows callers to tell if the graph is currently in sync with the - // current best known UTXO state. - PruneTip() (*chainhash.Hash, uint32, error) - - // PruneGraph prunes newly closed channels from the channel graph in - // response to a new block being solved on the network. Any transactions - // which spend the funding output of any known channels within the graph - // will be deleted. Additionally, the "prune tip", or the last block - // which has been used to prune the graph is stored so callers can - // ensure the graph is fully in sync with the current UTXO state. A - // slice of channels that have been closed by the target block are - // returned if the function succeeds without error. - PruneGraph(spentOutputs []*wire.OutPoint, blockHash *chainhash.Hash, - blockHeight uint32) ([]*models.ChannelEdgeInfo, error) - - // ChannelView returns the verifiable edge information for each active - // channel within the known channel graph. The set of UTXO's (along with - // their scripts) returned are the ones that need to be watched on - // chain to detect channel closes on the resident blockchain. - ChannelView() ([]graphdb.EdgePoint, error) - - // PruneGraphNodes is a garbage collection method which attempts to - // prune out any nodes from the channel graph that are currently - // unconnected. This ensure that we only maintain a graph of reachable - // nodes. In the event that a pruned node gains more channels, it will - // be re-added back to the graph. - PruneGraphNodes() error - - // SourceNode returns the source node of the graph. The source node is - // treated as the center node within a star-graph. This method may be - // used to kick off a path finding algorithm in order to explore the - // reachability of another node based off the source node. - SourceNode(ctx context.Context) (*models.Node, error) - - // DisabledChannelIDs returns the channel ids of disabled channels. - // A channel is disabled when two of the associated ChanelEdgePolicies - // have their disabled bit on. - DisabledChannelIDs() ([]uint64, error) - - // FetchChanInfos returns the set of channel edges that correspond to - // the passed channel ID's. If an edge is the query is unknown to the - // database, it will skipped and the result will contain only those - // edges that exist at the time of the query. This can be used to - // respond to peer queries that are seeking to fill in gaps in their - // view of the channel graph. - FetchChanInfos(chanIDs []uint64) ([]graphdb.ChannelEdge, error) - - // ChanUpdatesInHorizon returns all the known channel edges which have - // at least one edge that has an update timestamp within the specified - // horizon. - ChanUpdatesInHorizon(startTime, endTime time.Time, - opts ...graphdb.IteratorOption, - ) iter.Seq2[graphdb.ChannelEdge, error] - - // DeleteChannelEdges removes edges with the given channel IDs from the - // database and marks them as zombies. This ensures that we're unable to - // re-add it to our database once again. If an edge does not exist - // within the database, then ErrEdgeNotFound will be returned. If - // strictZombiePruning is true, then when we mark these edges as - // zombies, we'll set up the keys such that we require the node that - // failed to send the fresh update to be the one that resurrects the - // channel from its zombie state. The markZombie bool denotes whether - // to mark the channel as a zombie. - DeleteChannelEdges(strictZombiePruning, markZombie bool, - chanIDs ...uint64) error - - // DisconnectBlockAtHeight is used to indicate that the block specified - // by the passed height has been disconnected from the main chain. This - // will "rewind" the graph back to the height below, deleting channels - // that are no longer confirmed from the graph. The prune log will be - // set to the last prune height valid for the remaining chain. - // Channels that were removed from the graph resulting from the - // disconnected block are returned. - DisconnectBlockAtHeight(height uint32) ([]*models.ChannelEdgeInfo, - error) - - // HasChannelEdge returns true if the database knows of a channel edge - // with the passed channel ID, and false otherwise. If an edge with that - // ID is found within the graph, then two time stamps representing the - // last time the edge was updated for both directed edges are returned - // along with the boolean. If it is not found, then the zombie index is - // checked and its result is returned as the second boolean. - HasChannelEdge(chanID uint64) (time.Time, time.Time, bool, bool, error) - - // FetchChannelEdgesByID attempts to lookup the two directed edges for - // the channel identified by the channel ID. If the channel can't be - // found, then ErrEdgeNotFound is returned. A struct which houses the - // general information for the channel itself is returned as well as - // two structs that contain the routing policies for the channel in - // either direction. - // - // ErrZombieEdge an be returned if the edge is currently marked as a - // zombie within the database. In this case, the ChannelEdgePolicy's - // will be nil, and the ChannelEdgeInfo will only include the public - // keys of each node. - FetchChannelEdgesByID(chanID uint64) (*models.ChannelEdgeInfo, - *models.ChannelEdgePolicy, *models.ChannelEdgePolicy, error) - - // AddNode adds a vertex/node to the graph database. If the - // node is not in the database from before, this will add a new, - // unconnected one to the graph. If it is present from before, this will - // update that node's information. Note that this method is expected to - // only be called to update an already present node from a node - // announcement, or to insert a node found in a channel update. - AddNode(ctx context.Context, node *models.Node, - op ...batch.SchedulerOption) error - - // AddChannelEdge adds a new (undirected, blank) edge to the graph - // database. An undirected edge from the two target nodes are created. - // The information stored denotes the static attributes of the channel, - // such as the channelID, the keys involved in creation of the channel, - // and the set of features that the channel supports. The chanPoint and - // chanID are used to uniquely identify the edge globally within the - // database. - AddChannelEdge(ctx context.Context, edge *models.ChannelEdgeInfo, - op ...batch.SchedulerOption) error - - // MarkEdgeZombie attempts to mark a channel identified by its channel - // ID as a zombie. This method is used on an ad-hoc basis, when channels - // need to be marked as zombies outside the normal pruning cycle. - MarkEdgeZombie(chanID uint64, pubKey1, pubKey2 [33]byte) error - - // UpdateEdgePolicy updates the edge routing policy for a single - // directed edge within the database for the referenced channel. The - // `flags` attribute within the ChannelEdgePolicy determines which of - // the directed edges are being updated. If the flag is 1, then the - // first node's information is being updated, otherwise it's the second - // node's information. The node ordering is determined by the - // lexicographical ordering of the identity public keys of the nodes on - // either side of the channel. - UpdateEdgePolicy(ctx context.Context, edge *models.ChannelEdgePolicy, - op ...batch.SchedulerOption) error - - // HasNode determines if the graph has a vertex identified by - // the target node identity public key. If the node exists in the - // database, a timestamp of when the data for the node was lasted - // updated is returned along with a true boolean. Otherwise, an empty - // time.Time is returned with a false boolean. - HasNode(ctx context.Context, nodePub [33]byte) (time.Time, bool, error) - - // FetchNode attempts to look up a target node by its identity - // public key. If the node isn't found in the database, then - // ErrGraphNodeNotFound is returned. - FetchNode(ctx context.Context, nodePub route.Vertex) (*models.Node, - error) - - // ForEachNodeChannel iterates through all channels of the given node, - // executing the passed callback with an edge info structure and the - // policies of each end of the channel. The first edge policy is the - // outgoing edge *to* the connecting node, while the second is the - // incoming edge *from* the connecting node. If the callback returns an - // error, then the iteration is halted with the error propagated back up - // to the caller. - // - // Unknown policies are passed into the callback as nil values. - ForEachNodeChannel(ctx context.Context, nodePub route.Vertex, - cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy) error, reset func()) error - - // AddEdgeProof sets the proof of an existing edge in the graph - // database. - AddEdgeProof(chanID lnwire.ShortChannelID, - proof *models.ChannelAuthProof) error - - // IsPublicNode is a helper method that determines whether the node with - // the given public key is seen as a public node in the graph from the - // graph's source node's point of view. - IsPublicNode(pubKey [33]byte) (bool, error) - - // MarkEdgeLive clears an edge from our zombie index, deeming it as - // live. - MarkEdgeLive(chanID uint64) error -} diff --git a/graph/notifications_test.go b/graph/notifications_test.go index 32408424a1a..50dd89eac13 100644 --- a/graph/notifications_test.go +++ b/graph/notifications_test.go @@ -83,16 +83,16 @@ func createTestNode(t *testing.T) *models.Node { require.NoError(t, err) pub := priv.PubKey().SerializeCompressed() - n := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(updateTime, 0), - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + hex.EncodeToString(pub), - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(n.PubKeyBytes[:], pub) + var pubKey [33]byte + copy(pubKey[:], pub) + n := models.NewV1Node(pubKey, &models.NodeV1Fields{ + LastUpdate: time.Unix(updateTime, 0), + Addresses: testAddrs, + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek" + hex.EncodeToString(pub), + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + }) return n } @@ -700,15 +700,12 @@ func TestNodeUpdateNotification(t *testing.T) { t, testFeaturesBuf.Bytes(), featuresBuf.Bytes(), ) - if nodeUpdate.Alias != ann.Alias { - t.Fatalf("node alias doesn't match: expected %v, got %v", - ann.Alias, nodeUpdate.Alias) - } - if nodeUpdate.Color != graphdb.EncodeHexColor(ann.Color) { - t.Fatalf("node color doesn't match: expected %v, "+ - "got %v", graphdb.EncodeHexColor(ann.Color), - nodeUpdate.Color) - } + require.Equal(t, nodeUpdate.Alias, ann.Alias.UnwrapOr("")) + require.Equal( + t, nodeUpdate.Color, graphdb.EncodeHexColor( + ann.Color.UnwrapOr(color.RGBA{}), + ), + ) } // Create lookup map for notifications we are intending to receive. Entries diff --git a/lnrpc/devrpc/dev_server.go b/lnrpc/devrpc/dev_server.go index 6cc4347f067..ae4e6817be1 100644 --- a/lnrpc/devrpc/dev_server.go +++ b/lnrpc/devrpc/dev_server.go @@ -226,15 +226,7 @@ func (s *Server) ImportGraph(ctx context.Context, var err error for _, rpcNode := range graph.Nodes { - node := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix( - int64(rpcNode.LastUpdate), 0, - ), - Alias: rpcNode.Alias, - } - - node.PubKeyBytes, err = parsePubKey(rpcNode.PubKey) + pubKeyBytes, err := parsePubKey(rpcNode.PubKey) if err != nil { return nil, err } @@ -251,15 +243,21 @@ func (s *Server) ImportGraph(ctx context.Context, } featureVector := lnwire.NewRawFeatureVector(featureBits...) - node.Features = lnwire.NewFeatureVector( - featureVector, featureNames, - ) - node.Color, err = lncfg.ParseHexColor(rpcNode.Color) + nodeColor, err := lncfg.ParseHexColor(rpcNode.Color) if err != nil { return nil, err } + node := models.NewV1Node(pubKeyBytes, &models.NodeV1Fields{ + LastUpdate: time.Unix( + int64(rpcNode.LastUpdate), 0, + ), + Alias: rpcNode.Alias, + Features: featureVector, + Color: nodeColor, + }) + if err := graphDB.AddNode(ctx, node); err != nil { return nil, fmt.Errorf("unable to add node %v: %w", rpcNode.PubKey, err) diff --git a/routing/pathfind_test.go b/routing/pathfind_test.go index 77bad02e3d7..8e85716c447 100644 --- a/routing/pathfind_test.go +++ b/routing/pathfind_test.go @@ -228,15 +228,15 @@ func parseTestGraph(t *testing.T, useCache bool, path string) ( return nil, err } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: testTime, - Addresses: testAddrs, - Alias: node.Alias, - Features: testFeatures, - } - copy(dbNode.PubKeyBytes[:], pubBytes) + var pubKey [33]byte + copy(pubKey[:], pubBytes) + dbNode := models.NewV1Node(pubKey, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: testTime, + Addresses: testAddrs, + Alias: node.Alias, + Features: testFeatures.RawFeatureVector, + }) // We require all aliases within the graph to be unique for our // tests. @@ -565,14 +565,15 @@ func createTestGraphFromChannels(t *testing.T, useCache bool, features = lnwire.EmptyFeatureVector() } - dbNode := &models.Node{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: testTime, - Addresses: testAddrs, - Alias: alias, - Features: features, - } + var pubKeyBytes [33]byte + copy(pubKeyBytes[:], pubKey.SerializeCompressed()) + dbNode := models.NewV1Node(pubKeyBytes, &models.NodeV1Fields{ + AuthSigBytes: testSig.Serialize(), + LastUpdate: testTime, + Addresses: testAddrs, + Alias: alias, + Features: features.RawFeatureVector, + }) copy(dbNode.PubKeyBytes[:], pubKey.SerializeCompressed()) @@ -1250,13 +1251,12 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { dogePubKeyHex := "03dd46ff29a6941b4a2607525b043ec9b020b3f318a1bf281536fd7011ec59c882" dogePubKeyBytes, err := hex.DecodeString(dogePubKeyHex) require.NoError(t, err, "unable to decode public key") - dogePubKey, err := btcec.ParsePubKey(dogePubKeyBytes) - require.NoError(t, err, "unable to parse public key from bytes") - doge := &models.Node{} - doge.AddPubKey(dogePubKey) - doge.Alias = "doge" - copy(doge.PubKeyBytes[:], dogePubKeyBytes) + var pubKey [33]byte + copy(pubKey[:], dogePubKeyBytes) + doge := models.NewV1Node(pubKey, &models.NodeV1Fields{ + Alias: "doge", + }) graph.aliasMap["doge"] = doge.PubKeyBytes // Create the channel edge going from songoku to doge and include it in diff --git a/routing/payment_session_source.go b/routing/payment_session_source.go index bc1088d7b9f..15820059d1b 100644 --- a/routing/payment_session_source.go +++ b/routing/payment_session_source.go @@ -1,7 +1,6 @@ package routing import ( - "github.com/btcsuite/btcd/btcec/v2" "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/graph/db/models" "github.com/lightningnetwork/lnd/htlcswitch" @@ -102,17 +101,13 @@ func RouteHintsToEdges(routeHints [][]zpay32.HopHint, target route.Vertex) ( // we'll need to look at the next hint's start node. If // we've reached the end of the hints list, we can // assume we've reached the destination. - endNode := &models.Node{} + endNode := target if i != len(routeHint)-1 { - endNode.AddPubKey(routeHint[i+1].NodeID) - } else { - targetPubKey, err := btcec.ParsePubKey( - target[:], + nodeID := routeHint[i+1].NodeID + copy( + endNode[:], + nodeID.SerializeCompressed(), ) - if err != nil { - return nil, err - } - endNode.AddPubKey(targetPubKey) } // Finally, create the channel edge from the hop hint @@ -120,7 +115,7 @@ func RouteHintsToEdges(routeHints [][]zpay32.HopHint, target route.Vertex) ( // at the start of the channel. edgePolicy := &models.CachedEdgePolicy{ ToNodePubKey: func() route.Vertex { - return endNode.PubKeyBytes + return endNode }, ToNodeFeatures: lnwire.EmptyFeatureVector(), ChannelID: hopHint.ChannelID, diff --git a/routing/payment_session_test.go b/routing/payment_session_test.go index 12d8608a491..547fe0e2d3c 100644 --- a/routing/payment_session_test.go +++ b/routing/payment_session_test.go @@ -89,8 +89,9 @@ func TestUpdateAdditionalEdge(t *testing.T) { // Create a minimal test node using the private key priv1. pub := priv1.PubKey().SerializeCompressed() - testNode := &models.Node{} - copy(testNode.PubKeyBytes[:], pub) + var pubKey [33]byte + copy(pubKey[:], pub) + testNode := models.NewV1ShellNode(pubKey) nodeID, err := testNode.PubKey() require.NoError(t, err, "failed to get node id") diff --git a/routing/router_test.go b/routing/router_test.go index b811793d258..3daef10690f 100644 --- a/routing/router_test.go +++ b/routing/router_test.go @@ -23,7 +23,6 @@ import ( sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/fn/v2" - "github.com/lightningnetwork/lnd/graph" graphdb "github.com/lightningnetwork/lnd/graph/db" "github.com/lightningnetwork/lnd/graph/db/models" "github.com/lightningnetwork/lnd/htlcswitch" @@ -192,16 +191,16 @@ func createTestNode() (*models.Node, error) { } pub := priv.PubKey().SerializeCompressed() - n := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(updateTime, 0), - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + string(pub), - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(n.PubKeyBytes[:], pub) + var pubKey [33]byte + copy(pubKey[:], pub) + n := models.NewV1Node(pubKey, &models.NodeV1Fields{ + LastUpdate: time.Unix(updateTime, 0), + Addresses: testAddrs, + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek" + string(pub), + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + }) return n, nil } @@ -2871,29 +2870,29 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { // Now check that we can update the node info for the partial node // without messing up the channel graph. - n1 := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(123, 0), - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "node11", - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(n1.PubKeyBytes[:], priv1.PubKey().SerializeCompressed()) + var pubKeyBytes1 [33]byte + copy(pubKeyBytes1[:], priv1.PubKey().SerializeCompressed()) + n1 := models.NewV1Node(pubKeyBytes1, &models.NodeV1Fields{ + LastUpdate: time.Unix(123, 0), + Addresses: testAddrs, + Color: color.RGBA{1, 2, 3, 0}, + Alias: "node11", + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + }) require.NoError(t, ctx.graph.AddNode(ctxb, n1)) - n2 := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(123, 0), - Addresses: testAddrs, - Color: color.RGBA{1, 2, 3, 0}, - Alias: "node22", - AuthSigBytes: testSig.Serialize(), - Features: testFeatures, - } - copy(n2.PubKeyBytes[:], priv2.PubKey().SerializeCompressed()) + var pubKeyBytes2 [33]byte + copy(pubKeyBytes2[:], priv2.PubKey().SerializeCompressed()) + n2 := models.NewV1Node(pubKeyBytes2, &models.NodeV1Fields{ + LastUpdate: time.Unix(123, 0), + Addresses: testAddrs, + Color: color.RGBA{1, 2, 3, 0}, + Alias: "node22", + AuthSigBytes: testSig.Serialize(), + Features: testFeatures.RawFeatureVector, + }) require.NoError(t, ctx.graph.AddNode(ctxb, n2)) @@ -2941,7 +2940,7 @@ type mockGraphBuilder struct { updateEdge func(update *models.ChannelEdgePolicy) error } -func newMockGraphBuilder(graph graph.DB) *mockGraphBuilder { +func newMockGraphBuilder(graph *graphdb.ChannelGraph) *mockGraphBuilder { return &mockGraphBuilder{ updateEdge: func(update *models.ChannelEdgePolicy) error { return graph.UpdateEdgePolicy( diff --git a/rpcserver.go b/rpcserver.go index d3d3c518014..32282455dcb 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "image/color" "io" "maps" "math" @@ -7178,11 +7179,13 @@ func marshalNode(node *models.Node) *lnrpc.LightningNode { customRecords := marshalExtraOpaqueData(node.ExtraOpaqueData) return &lnrpc.LightningNode{ - LastUpdate: uint32(node.LastUpdate.Unix()), - PubKey: hex.EncodeToString(node.PubKeyBytes[:]), - Addresses: nodeAddrs, - Alias: node.Alias, - Color: graphdb.EncodeHexColor(node.Color), + LastUpdate: uint32(node.LastUpdate.Unix()), + PubKey: hex.EncodeToString(node.PubKeyBytes[:]), + Addresses: nodeAddrs, + Alias: node.Alias.UnwrapOr(""), + Color: graphdb.EncodeHexColor( + node.Color.UnwrapOr(color.RGBA{}), + ), Features: features, CustomRecords: customRecords, } @@ -8249,9 +8252,9 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context, } // Cache the peer alias. - chanToPeerAlias[chanID] = peer.Alias + chanToPeerAlias[chanID] = peer.Alias.UnwrapOr("") - return peer.Alias, nil + return peer.Alias.UnwrapOr(""), nil } // TODO(roasbeef): add settlement latency? diff --git a/server.go b/server.go index a2d36eb8653..2ad809d5e79 100644 --- a/server.go +++ b/server.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "errors" "fmt" + "image/color" "math/big" prand "math/rand" "net" @@ -3317,18 +3318,19 @@ func (s *server) createNewHiddenService(ctx context.Context) error { // Finally, we'll update the on-disk version of our announcement so it // will eventually propagate to nodes in the network. - selfNode := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: time.Unix(int64(newNodeAnn.Timestamp), 0), - Addresses: newNodeAnn.Addresses, - Alias: newNodeAnn.Alias.String(), - Features: lnwire.NewFeatureVector( - newNodeAnn.Features, lnwire.Features, - ), - Color: newNodeAnn.RGBColor, - AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(), - } - copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed()) + var nodePub route.Vertex + copy(nodePub[:], s.identityECDH.PubKey().SerializeCompressed()) + selfNode := models.NewV1Node( + nodePub, &models.NodeV1Fields{ + Addresses: newNodeAnn.Addresses, + Features: newNodeAnn.Features, + AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(), + Color: newNodeAnn.RGBColor, + Alias: newNodeAnn.Alias.String(), + LastUpdate: time.Unix(int64(newNodeAnn.Timestamp), 0), + }, + ) + if err := s.graphDB.SetSourceNode(ctx, selfNode); err != nil { return fmt.Errorf("can't set self node: %w", err) } @@ -3442,12 +3444,11 @@ func (s *server) updateAndBroadcastSelfNode(ctx context.Context, return fmt.Errorf("unable to get current source node: %w", err) } - selfNode.HaveNodeAnnouncement = true selfNode.LastUpdate = time.Unix(int64(newNodeAnn.Timestamp), 0) selfNode.Addresses = newNodeAnn.Addresses - selfNode.Alias = newNodeAnn.Alias.String() + selfNode.Alias = fn.Some(newNodeAnn.Alias.String()) selfNode.Features = s.featureMgr.Get(feature.SetNodeAnn) - selfNode.Color = newNodeAnn.RGBColor + selfNode.Color = fn.Some(newNodeAnn.RGBColor) selfNode.AuthSigBytes = newNodeAnn.Signature.ToSignatureBytes() copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed()) @@ -5564,7 +5565,7 @@ func (s *server) setSelfNode(ctx context.Context, nodePub route.Vertex, // Parse the color from config. We will update this later if the config // color is not changed from default (#3399FF) and we have a value in // the source node. - color, err := lncfg.ParseHexColor(s.cfg.Color) + nodeColor, err := lncfg.ParseHexColor(s.cfg.Color) if err != nil { return fmt.Errorf("unable to parse color: %w", err) } @@ -5588,13 +5589,17 @@ func (s *server) setSelfNode(ctx context.Context, nodePub route.Vertex, // didn't specify a different color in the config. We'll use the // source node's color. if s.cfg.Color == defaultColor { - color = srcNode.Color + srcNode.Color.WhenSome(func(rgba color.RGBA) { + nodeColor = rgba + }) } // If an alias is not specified in the config, we'll use the // source node's alias. if alias == "" { - alias = srcNode.Alias + srcNode.Alias.WhenSome(func(s string) { + alias = s + }) } // If the `externalip` is not specified in the config, it means @@ -5624,16 +5629,21 @@ func (s *server) setSelfNode(ctx context.Context, nodePub route.Vertex, // TODO(abdulkbk): potentially find a way to use the source node's // features in the self node. - selfNode := &models.Node{ - HaveNodeAnnouncement: true, - LastUpdate: nodeLastUpdate, - Addresses: addrs, - Alias: nodeAlias.String(), - Color: color, - Features: s.featureMgr.Get(feature.SetNodeAnn), - } - - copy(selfNode.PubKeyBytes[:], nodePub[:]) + selfNode := models.NewV1Node( + nodePub, + &models.NodeV1Fields{ + Alias: nodeAlias.String(), + Color: nodeColor, + LastUpdate: nodeLastUpdate, + Addresses: addrs, + Features: s.featureMgr.GetRaw(feature.SetNodeAnn), + // NOTE: just a workaround to pass the below call to + // NodeAnnouncement which would otherwise fail because + // of missing AuthSigBytes. The AutSigBytes is set + // properly when SigneAnnouncement is called below. + AuthSigBytes: []byte{0}, + }, + ) // Based on the disk representation of the node announcement generated // above, we'll generate a node announcement that can go out on the