Skip to content

Commit a06e1cc

Browse files
committed
node.ForEachTracked and node.ForEachTrackedWithNodeSink methods
1 parent a0b13f3 commit a06e1cc

File tree

1 file changed

+81
-0
lines changed

1 file changed

+81
-0
lines changed

hamt.go

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -877,3 +877,84 @@ func (n *Node) ForEach(ctx context.Context, f func(k string, val *cbg.Deferred)
877877
}
878878
return nil
879879
}
880+
881+
// ForEachTracked recursively calls function f on each k / val pair found in the HAMT.
882+
// This performs a full traversal of the graph and for large HAMTs can cause
883+
// a large number of loads from the underlying store.
884+
// The values are returned as raw bytes, not decoded.
885+
// This method also provides the trail of indices to the current node, which can be used to formulate a selector suffix
886+
func (n *Node) ForEachTracked(ctx context.Context, trail []int, f func(k string, val *cbg.Deferred, selectorSuffix []int) error) error {
887+
idx := 0
888+
for _, p := range n.Pointers {
889+
// Seek the next set bit in the bitfield to find the actual index for this pointer
890+
for n.Bitfield.Bit(idx) == 0 {
891+
idx++
892+
}
893+
trail = append(trail, idx)
894+
if p.isShard() {
895+
chnd, err := p.loadChild(ctx, n.store, n.bitWidth, n.hash)
896+
if err != nil {
897+
return err
898+
}
899+
900+
if err := chnd.ForEachTracked(ctx, trail, f); err != nil {
901+
return err
902+
}
903+
} else {
904+
for _, kv := range p.KVs {
905+
if err := f(string(kv.Key), kv.Value, trail); err != nil {
906+
return err
907+
}
908+
}
909+
}
910+
idx++
911+
}
912+
return nil
913+
}
914+
915+
// ForEachTrackedWithNodeSink recursively calls function f on each k / val pair found in the HAMT.
916+
// This performs a full traversal of the graph and for large HAMTs can cause
917+
// a large number of loads from the underlying store.
918+
// The values are returned as raw bytes, not decoded.
919+
// This method also provides the trail of indices to the current node, which can be used to formulate a selector suffix
920+
// This method also provides a callback to sink the current node
921+
func (n *Node) ForEachTrackedWithNodeSink(ctx context.Context, trail []int, b *bytes.Buffer, sink cbg.CBORUnmarshaler, f func(k string, val *cbg.Deferred, selectorSuffix []int) error) error {
922+
if sink != nil {
923+
if b == nil {
924+
b = bytes.NewBuffer(nil)
925+
}
926+
b.Reset()
927+
if err := n.MarshalCBOR(b); err != nil {
928+
return err
929+
}
930+
if err := sink.UnmarshalCBOR(b); err != nil {
931+
return err
932+
}
933+
}
934+
idx := 0
935+
for _, p := range n.Pointers {
936+
// Seek the next set bit in the bitfield to find the actual index for this pointer
937+
for n.Bitfield.Bit(idx) == 0 {
938+
idx++
939+
}
940+
trail = append(trail, idx)
941+
if p.isShard() {
942+
chnd, err := p.loadChild(ctx, n.store, n.bitWidth, n.hash)
943+
if err != nil {
944+
return err
945+
}
946+
947+
if err := chnd.ForEachTrackedWithNodeSink(ctx, trail, b, sink, f); err != nil {
948+
return err
949+
}
950+
} else {
951+
for _, kv := range p.KVs {
952+
if err := f(string(kv.Key), kv.Value, trail); err != nil {
953+
return err
954+
}
955+
}
956+
}
957+
idx++
958+
}
959+
return nil
960+
}

0 commit comments

Comments
 (0)