diff --git a/Makefile b/Makefile index aff534c0..5a04af3d 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ LDFLAGS := -X github.com/Overclock-Validator/mithril/pkg/version.Version=$(VERSI -X github.com/Overclock-Validator/mithril/pkg/version.GitBranch=$(GIT_BRANCH) \ -X github.com/Overclock-Validator/mithril/pkg/version.BuildDate=$(BUILD_DATE) -.PHONY: build release clean server-setup disk-setup tune +.PHONY: build release clean server-setup disk-setup tune test-conformance-elf build: go build -ldflags "$(LDFLAGS)" -o mithril ./cmd/mithril @@ -28,3 +28,6 @@ disk-setup: tune: ./scripts/performance-tune.sh $(ARGS) + +test-conformance-elf: + go test ./conformance/ -run TestConformance_ElfLoader_Firedancer -v diff --git a/conformance/elf_loader_fb_test.go b/conformance/elf_loader_fb_test.go new file mode 100644 index 00000000..5544b330 --- /dev/null +++ b/conformance/elf_loader_fb_test.go @@ -0,0 +1,210 @@ +package conformance + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/Overclock-Validator/mithril/conformance/sealevel" + "github.com/Overclock-Validator/mithril/pkg/features" + "github.com/Overclock-Validator/mithril/pkg/sbpf" + "github.com/Overclock-Validator/mithril/pkg/sbpf/loader" + sealevelPkg "github.com/Overclock-Validator/mithril/pkg/sealevel" +) + +func parseFBFeatures(fbFeatures *sealevel.FeatureSet) *features.Features { + f := features.NewFeaturesDefault() + if fbFeatures == nil { + return f + } + n := fbFeatures.FeaturesLength() + for i := 0; i < n; i++ { + ftr := fbFeatures.Features(i) + for _, featureGate := range features.AllFeatureGates { + featureIdInt := binary.LittleEndian.Uint64(featureGate.Address[:8]) + if featureIdInt == ftr { + f.EnableFeature(featureGate, 0) + } + } + } + return f +} + +func TestConformance_ElfLoader_Firedancer(t *testing.T) { + basePath := "test-vectors/elf_loader/fixtures" + + entries, err := os.ReadDir(basePath) + if err != nil { + t.Skipf("test-vectors not available: %v", err) + } + + var fixtures []string + for _, entry := range entries { + if strings.HasSuffix(entry.Name(), ".fix") { + fixtures = append(fixtures, filepath.Join(basePath, entry.Name())) + } + } + + if len(fixtures) == 0 { + t.Skip("no .fix fixtures found") + } + + t.Logf("Found %d ELF loader fixtures", len(fixtures)) + + var ( + total int + passPass int + failFail int + falsePass int + falseFail int + panics int + parseErrors int + entryMatch int + entryTotal int + textMatch int + textTotal int + ) + + var failures []string + var panicFixtures []string + + for _, fixturePath := range fixtures { + total++ + name := filepath.Base(fixturePath) + + data, err := os.ReadFile(fixturePath) + if err != nil { + t.Errorf("%s: read error: %v", name, err) + continue + } + + fixture := sealevel.GetRootAsELFLoaderFixture(data, 0) + if fixture == nil { + parseErrors++ + continue + } + + input := fixture.Input(nil) + if input == nil { + parseErrors++ + continue + } + + elfData := input.ElfDataBytes() + if elfData == nil { + parseErrors++ + continue + } + + output := fixture.Output(nil) + fixtureExpectsSuccess := output != nil && output.ErrCode() == 0 + + fbFeatures := input.Features(nil) + f := parseFBFeatures(fbFeatures) + + syscalls := sbpf.SyscallRegistry(func(hash uint32) (sbpf.Syscall, bool) { + return sealevelPkg.Syscalls(f, input.DeployChecks(), hash) + }) + + var program *sbpf.Program + var loadErr error + var didPanic bool + + func() { + defer func() { + if r := recover(); r != nil { + didPanic = true + panics++ + panicFixtures = append(panicFixtures, fmt.Sprintf("PANIC %s: %v", name, r)) + } + }() + + l, err := loader.NewLoaderWithSyscalls(elfData, syscalls, input.DeployChecks(), f) + if err != nil { + loadErr = err + return + } + program, loadErr = l.Load() + }() + + if didPanic { + continue + } + + if loadErr == nil && fixtureExpectsSuccess { + passPass++ + + if output != nil { + entryTotal++ + if program.Entrypoint == output.EntryPc() { + entryMatch++ + } else { + failures = append(failures, fmt.Sprintf("ENTRY_MISMATCH %s: got=%d want=%d", name, program.Entrypoint, output.EntryPc())) + } + + textTotal++ + if uint64(len(program.Text)) == output.TextCnt() { + textMatch++ + } else { + failures = append(failures, fmt.Sprintf("TEXT_CNT_MISMATCH %s: got=%d want=%d", name, len(program.Text), output.TextCnt())) + } + } + } else if loadErr != nil && !fixtureExpectsSuccess { + failFail++ + } else if loadErr == nil && !fixtureExpectsSuccess { + falsePass++ + failures = append(failures, fmt.Sprintf("FALSE_PASS %s: loaded OK but fixture expects failure", name)) + } else { + falseFail++ + failures = append(failures, fmt.Sprintf("FALSE_FAIL %s: %v (entry_pc=%d text_cnt=%d)", name, loadErr, output.EntryPc(), output.TextCnt())) + } + } + + sort.Strings(failures) + + t.Logf("\n=== ELF Loader Conformance Results ===") + t.Logf("Total fixtures: %d", total) + t.Logf("Parse errors: %d", parseErrors) + t.Logf("Both pass: %d", passPass) + t.Logf("Both fail: %d", failFail) + t.Logf("False pass (bad): %d (mithril loads, fixture rejects)", falsePass) + t.Logf("False fail (bad): %d (mithril rejects, fixture loads)", falseFail) + t.Logf("Panics (crash bug): %d", panics) + t.Logf("Entry PC match: %d / %d", entryMatch, entryTotal) + t.Logf("Text count match: %d / %d", textMatch, textTotal) + + if len(panicFixtures) > 0 { + t.Logf("\n=== PANICS (crash bugs - highest priority) ===") + for _, p := range panicFixtures { + t.Logf(" %s", p) + } + } + + if len(failures) > 0 { + t.Logf("\n=== First 50 failures ===") + limit := 50 + if len(failures) < limit { + limit = len(failures) + } + for _, f := range failures[:limit] { + t.Logf(" %s", f) + } + } + + // Report pass rate + agree := passPass + failFail + disagree := falsePass + falseFail + passRate := float64(agree) / float64(agree+disagree) * 100 + t.Logf("\nConformance rate: %.1f%% (%d/%d)", passRate, agree, agree+disagree) + + if panics > 0 { + t.Errorf("CRITICAL: %d fixtures caused panics in the loader", panics) + } + if disagree > 0 { + t.Logf("WARNING: %d disagreements found", disagree) + } +} diff --git a/conformance/sealevel/ELFLoaderCtx.go b/conformance/sealevel/ELFLoaderCtx.go new file mode 100644 index 00000000..41dfb054 --- /dev/null +++ b/conformance/sealevel/ELFLoaderCtx.go @@ -0,0 +1,120 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type ELFLoaderCtx struct { + _tab flatbuffers.Table +} + +func GetRootAsELFLoaderCtx(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderCtx { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ELFLoaderCtx{} + x.Init(buf, n+offset) + return x +} + +func FinishELFLoaderCtxBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsELFLoaderCtx(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderCtx { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &ELFLoaderCtx{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedELFLoaderCtxBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *ELFLoaderCtx) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ELFLoaderCtx) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *ELFLoaderCtx) ElfData(j int) byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) + } + return 0 +} + +func (rcv *ELFLoaderCtx) ElfDataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *ELFLoaderCtx) ElfDataBytes() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *ELFLoaderCtx) MutateElfData(j int, n byte) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) + } + return false +} + +func (rcv *ELFLoaderCtx) Features(obj *FeatureSet) *FeatureSet { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(FeatureSet) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *ELFLoaderCtx) DeployChecks() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *ELFLoaderCtx) MutateDeployChecks(n bool) bool { + return rcv._tab.MutateBoolSlot(8, n) +} + +func ELFLoaderCtxStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func ELFLoaderCtxAddElfData(builder *flatbuffers.Builder, elfData flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(elfData), 0) +} +func ELFLoaderCtxStartElfDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(1, numElems, 1) +} +func ELFLoaderCtxAddFeatures(builder *flatbuffers.Builder, features flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(features), 0) +} +func ELFLoaderCtxAddDeployChecks(builder *flatbuffers.Builder, deployChecks bool) { + builder.PrependBoolSlot(2, deployChecks, false) +} +func ELFLoaderCtxEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/conformance/sealevel/ELFLoaderEffects.go b/conformance/sealevel/ELFLoaderEffects.go new file mode 100644 index 00000000..ee1c0779 --- /dev/null +++ b/conformance/sealevel/ELFLoaderEffects.go @@ -0,0 +1,141 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type ELFLoaderEffects struct { + _tab flatbuffers.Table +} + +func GetRootAsELFLoaderEffects(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderEffects { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ELFLoaderEffects{} + x.Init(buf, n+offset) + return x +} + +func FinishELFLoaderEffectsBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsELFLoaderEffects(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderEffects { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &ELFLoaderEffects{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedELFLoaderEffectsBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *ELFLoaderEffects) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ELFLoaderEffects) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *ELFLoaderEffects) ErrCode() byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetByte(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *ELFLoaderEffects) MutateErrCode(n byte) bool { + return rcv._tab.MutateByteSlot(4, n) +} + +func (rcv *ELFLoaderEffects) RodataHash(obj *XXHash) *XXHash { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(XXHash) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *ELFLoaderEffects) TextCnt() uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetUint64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *ELFLoaderEffects) MutateTextCnt(n uint64) bool { + return rcv._tab.MutateUint64Slot(8, n) +} + +func (rcv *ELFLoaderEffects) TextOff() uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetUint64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *ELFLoaderEffects) MutateTextOff(n uint64) bool { + return rcv._tab.MutateUint64Slot(10, n) +} + +func (rcv *ELFLoaderEffects) EntryPc() uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.GetUint64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *ELFLoaderEffects) MutateEntryPc(n uint64) bool { + return rcv._tab.MutateUint64Slot(12, n) +} + +func (rcv *ELFLoaderEffects) CalldestsHash(obj *XXHash) *XXHash { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(XXHash) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func ELFLoaderEffectsStart(builder *flatbuffers.Builder) { + builder.StartObject(6) +} +func ELFLoaderEffectsAddErrCode(builder *flatbuffers.Builder, errCode byte) { + builder.PrependByteSlot(0, errCode, 0) +} +func ELFLoaderEffectsAddRodataHash(builder *flatbuffers.Builder, rodataHash flatbuffers.UOffsetT) { + builder.PrependStructSlot(1, flatbuffers.UOffsetT(rodataHash), 0) +} +func ELFLoaderEffectsAddTextCnt(builder *flatbuffers.Builder, textCnt uint64) { + builder.PrependUint64Slot(2, textCnt, 0) +} +func ELFLoaderEffectsAddTextOff(builder *flatbuffers.Builder, textOff uint64) { + builder.PrependUint64Slot(3, textOff, 0) +} +func ELFLoaderEffectsAddEntryPc(builder *flatbuffers.Builder, entryPc uint64) { + builder.PrependUint64Slot(4, entryPc, 0) +} +func ELFLoaderEffectsAddCalldestsHash(builder *flatbuffers.Builder, calldestsHash flatbuffers.UOffsetT) { + builder.PrependStructSlot(5, flatbuffers.UOffsetT(calldestsHash), 0) +} +func ELFLoaderEffectsEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/conformance/sealevel/ELFLoaderFixture.go b/conformance/sealevel/ELFLoaderFixture.go new file mode 100644 index 00000000..097ad083 --- /dev/null +++ b/conformance/sealevel/ELFLoaderFixture.go @@ -0,0 +1,97 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type ELFLoaderFixture struct { + _tab flatbuffers.Table +} + +func GetRootAsELFLoaderFixture(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderFixture { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ELFLoaderFixture{} + x.Init(buf, n+offset) + return x +} + +func FinishELFLoaderFixtureBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsELFLoaderFixture(buf []byte, offset flatbuffers.UOffsetT) *ELFLoaderFixture { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &ELFLoaderFixture{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedELFLoaderFixtureBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *ELFLoaderFixture) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ELFLoaderFixture) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *ELFLoaderFixture) Metadata(obj *FixtureMetadata) *FixtureMetadata { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(FixtureMetadata) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *ELFLoaderFixture) Input(obj *ELFLoaderCtx) *ELFLoaderCtx { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(ELFLoaderCtx) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *ELFLoaderFixture) Output(obj *ELFLoaderEffects) *ELFLoaderEffects { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(ELFLoaderEffects) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func ELFLoaderFixtureStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func ELFLoaderFixtureAddMetadata(builder *flatbuffers.Builder, metadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(metadata), 0) +} +func ELFLoaderFixtureAddInput(builder *flatbuffers.Builder, input flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(input), 0) +} +func ELFLoaderFixtureAddOutput(builder *flatbuffers.Builder, output flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(output), 0) +} +func ELFLoaderFixtureEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/conformance/sealevel/FeatureSet.go b/conformance/sealevel/FeatureSet.go new file mode 100644 index 00000000..7b9ec5a5 --- /dev/null +++ b/conformance/sealevel/FeatureSet.go @@ -0,0 +1,81 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FeatureSet struct { + _tab flatbuffers.Table +} + +func GetRootAsFeatureSet(buf []byte, offset flatbuffers.UOffsetT) *FeatureSet { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FeatureSet{} + x.Init(buf, n+offset) + return x +} + +func FinishFeatureSetBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsFeatureSet(buf []byte, offset flatbuffers.UOffsetT) *FeatureSet { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &FeatureSet{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedFeatureSetBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *FeatureSet) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FeatureSet) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *FeatureSet) Features(j int) uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *FeatureSet) FeaturesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *FeatureSet) MutateFeatures(j int, n uint64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + +func FeatureSetStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FeatureSetAddFeatures(builder *flatbuffers.Builder, features flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(features), 0) +} +func FeatureSetStartFeaturesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func FeatureSetEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/conformance/sealevel/FixtureMetadata.go b/conformance/sealevel/FixtureMetadata.go new file mode 100644 index 00000000..71c5b5dc --- /dev/null +++ b/conformance/sealevel/FixtureMetadata.go @@ -0,0 +1,60 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FixtureMetadata struct { + _tab flatbuffers.Table +} + +func GetRootAsFixtureMetadata(buf []byte, offset flatbuffers.UOffsetT) *FixtureMetadata { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FixtureMetadata{} + x.Init(buf, n+offset) + return x +} + +func FinishFixtureMetadataBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsFixtureMetadata(buf []byte, offset flatbuffers.UOffsetT) *FixtureMetadata { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &FixtureMetadata{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedFixtureMetadataBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *FixtureMetadata) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FixtureMetadata) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *FixtureMetadata) FnEntrypoint() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func FixtureMetadataStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FixtureMetadataAddFnEntrypoint(builder *flatbuffers.Builder, fnEntrypoint flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(fnEntrypoint), 0) +} +func FixtureMetadataEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/conformance/sealevel/XXHash.go b/conformance/sealevel/XXHash.go new file mode 100644 index 00000000..08ee2f49 --- /dev/null +++ b/conformance/sealevel/XXHash.go @@ -0,0 +1,89 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package sealevel + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type XXHash struct { + _tab flatbuffers.Struct +} + +func (rcv *XXHash) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *XXHash) Table() flatbuffers.Table { + return rcv._tab.Table +} + +func (rcv *XXHash) H0() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(0)) +} +func (rcv *XXHash) MutateH0(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) +} + +func (rcv *XXHash) H1() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(1)) +} +func (rcv *XXHash) MutateH1(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(1), n) +} + +func (rcv *XXHash) H2() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(2)) +} +func (rcv *XXHash) MutateH2(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(2), n) +} + +func (rcv *XXHash) H3() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(3)) +} +func (rcv *XXHash) MutateH3(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(3), n) +} + +func (rcv *XXHash) H4() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(4)) +} +func (rcv *XXHash) MutateH4(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(4), n) +} + +func (rcv *XXHash) H5() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(5)) +} +func (rcv *XXHash) MutateH5(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(5), n) +} + +func (rcv *XXHash) H6() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(6)) +} +func (rcv *XXHash) MutateH6(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(6), n) +} + +func (rcv *XXHash) H7() byte { + return rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(7)) +} +func (rcv *XXHash) MutateH7(n byte) bool { + return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(7), n) +} + +func CreateXXHash(builder *flatbuffers.Builder, h0 byte, h1 byte, h2 byte, h3 byte, h4 byte, h5 byte, h6 byte, h7 byte) flatbuffers.UOffsetT { + builder.Prep(1, 8) + builder.PrependByte(h7) + builder.PrependByte(h6) + builder.PrependByte(h5) + builder.PrependByte(h4) + builder.PrependByte(h3) + builder.PrependByte(h2) + builder.PrependByte(h1) + builder.PrependByte(h0) + return builder.Offset() +} diff --git a/go.mod b/go.mod index b6d6eebb..26c6388f 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( github.com/getsentry/sentry-go v0.27.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/flatbuffers v25.12.19+incompatible // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect diff --git a/go.sum b/go.sum index 041b76ad..1615d8d8 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v25.12.19+incompatible h1:haMV2JRRJCe1998HeW/p0X9UaMTK6SDo0ffLn2+DbLs= +github.com/google/flatbuffers v25.12.19+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= diff --git a/pkg/sbpf/interpreter.go b/pkg/sbpf/interpreter.go index dfff9f40..2d38520a 100644 --- a/pkg/sbpf/interpreter.go +++ b/pkg/sbpf/interpreter.go @@ -132,6 +132,12 @@ func (ip *Interpreter) Run() (ret uint64, cuConsumed uint64, err error) { mainLoop: for i := 0; true; i++ { // Fetch + if pc < 0 || pc >= int64(len(ip.text)) { + return 0, 0, &Exception{ + PC: pc, + Detail: fmt.Errorf("tx: %s, programId: %s - %s:", ip.txSignature, ip.programId, ExcExecutionOverrun), + } + } ins := ip.getSlot(pc) if ip.enableTracing { regsDump := fmt.Sprintf("%016x, %016x, %016x, %016x, %016x, %016x, %016x, %016x, %016x, %016x, %016x", @@ -522,6 +528,7 @@ mainLoop: } if int32(r[ins.Dst()]) == math.MinInt32 && ins.Imm() == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(uint32(int32(r[ins.Dst()]) / ins.Imm())) pc++ @@ -533,10 +540,12 @@ mainLoop: if src := int32(r[ins.Src()]); src != 0 { if int32(r[ins.Dst()]) == math.MinInt32 && src == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(uint32(int32(r[ins.Dst()]) / src)) } else { err = ExcDivideByZero + break } pc++ case OpSdiv64Imm: @@ -546,6 +555,7 @@ mainLoop: } if int64(r[ins.Dst()]) == math.MinInt64 && ins.Imm() == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(int64(r[ins.Dst()]) / int64(ins.Imm())) pc++ @@ -557,10 +567,12 @@ mainLoop: if src := int64(r[ins.Src()]); src != 0 { if int64(r[ins.Dst()]) == math.MinInt64 && src == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(int64(r[ins.Dst()]) / src) } else { err = ExcDivideByZero + break } pc++ case OpSrem32Imm: @@ -570,6 +582,7 @@ mainLoop: } if int32(r[ins.Dst()]) == math.MinInt32 && ins.Imm() == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(uint32(int32(r[ins.Dst()]) % ins.Imm())) pc++ @@ -581,10 +594,12 @@ mainLoop: if src := int32(r[ins.Src()]); src != 0 { if int32(r[ins.Dst()]) == math.MinInt32 && src == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(uint32(int32(r[ins.Dst()]) % int32(r[ins.Src()]))) } else { err = ExcDivideByZero + break } pc++ case OpSrem64Imm: @@ -594,6 +609,7 @@ mainLoop: } if int64(r[ins.Dst()]) == math.MinInt64 && ins.Imm() == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(int64(r[ins.Dst()]) % int64(ins.Imm())) pc++ @@ -605,10 +621,12 @@ mainLoop: if src := int64(r[ins.Src()]); src != 0 { if int64(r[ins.Dst()]) == math.MinInt64 && src == -1 { err = ExcDivideOverflow + break } r[ins.Dst()] = uint64(int64(r[ins.Dst()]) % int64(r[ins.Src()])) } else { err = ExcDivideByZero + break } pc++ case OpOr32Imm: @@ -779,7 +797,7 @@ mainLoop: case 64: r[ins.Dst()] &= math.MaxUint64 default: - panic("invalid le instruction") + err = ExcUnsupportedInstruction } pc++ case OpBe: @@ -791,7 +809,7 @@ mainLoop: case 64: r[ins.Dst()] = bits.ReverseBytes64(r[ins.Dst()]) default: - panic("invalid be instruction") + err = ExcUnsupportedInstruction } pc++ case OpLddw: @@ -917,6 +935,9 @@ mainLoop: case OpCall: if sc, ok := ip.syscalls(ins.Uimm()); ok { r[0], err = sc.Invoke(ip, r[1], r[2], r[3], r[4], r[5]) + if err != nil { + err = ExcSyscallError{Err: err} + } pc++ } else if target, ok := ip.funcs[ins.Uimm()]; ok { ok = ip.stack.Push(r[:], pc+1) @@ -936,13 +957,13 @@ mainLoop: } target &= ^(uint64(0x7)) - var ok bool - ok = ip.stack.Push(r[:], pc+1) - if !ok { - err = ExcCallDepth - } if target < ip.textVA || target >= VaddrStack || target >= ip.textVA+uint64(len(ip.text)*8) { err = NewExcBadAccess(target, 8, false, "jump out-of-bounds") + break + } + if ok := ip.stack.Push(r[:], pc+1); !ok { + err = ExcCallDepth + break } pc = int64((target - ip.textVA) / 8) case OpExit: @@ -1051,11 +1072,7 @@ func (ip *Interpreter) translateInternal(addr uint64, size uint64, write bool) ( } return unsafe.Pointer(&ip.input[lo]), nil default: - if size == 0 { - return emptySlice, nil - } else { - return nil, NewExcBadAccess(addr, size, write, "unmapped region") - } + return nil, NewExcBadAccess(addr, size, write, "unmapped region") } } diff --git a/pkg/sbpf/loader/copy.go b/pkg/sbpf/loader/copy.go index 3a23f773..b5e4a42c 100644 --- a/pkg/sbpf/loader/copy.go +++ b/pkg/sbpf/loader/copy.go @@ -35,11 +35,9 @@ func (l *Loader) getText() error { return fmt.Errorf("invalid .text: %w", err) } - if l.shText.Size%8 != 0 { - return fmt.Errorf(".text section size was not divisible by 8, got %d", l.shText.Size) - } + textSize := (l.shText.Size / 8) * 8 - l.textRange = addrRange{min: l.shText.Off, max: l.shText.Off + l.shText.Size} + l.textRange = addrRange{min: l.shText.Off, max: l.shText.Off + textSize} return nil } @@ -85,12 +83,10 @@ func (l *Loader) mapSections() error { } func (l *Loader) checkSectionAddrs(sh *elf.Section64) error { - // TODO Support true vaddr ELFs - if sh.Size > l.fileSize { return io.ErrUnexpectedEOF } - if sh.Addr != sh.Off { + if (sh.Flags&uint64(elf.SHF_ALLOC)) != 0 && sh.Addr != sh.Off { return fmt.Errorf("section physical address out-of-place") } diff --git a/pkg/sbpf/loader/loader.go b/pkg/sbpf/loader/loader.go index 916ccb73..c2cb4a4c 100644 --- a/pkg/sbpf/loader/loader.go +++ b/pkg/sbpf/loader/loader.go @@ -61,7 +61,8 @@ type Loader struct { entrypoint uint64 // program counter // Symbols - funcs map[uint32]int64 + funcs map[uint32]int64 + funcName map[uint32]int64 } // Bounds checks diff --git a/pkg/sbpf/loader/parse.go b/pkg/sbpf/loader/parse.go index 4d1767d7..bf1b87a5 100644 --- a/pkg/sbpf/loader/parse.go +++ b/pkg/sbpf/loader/parse.go @@ -9,8 +9,6 @@ import ( "math" "math/bits" "strings" - - "github.com/Overclock-Validator/mithril/pkg/sbpf/sbpfver" ) // parse checks ELF file for validity and loads metadata with minimal allocations. @@ -146,21 +144,15 @@ func (l *Loader) validateElfHeader() error { return fmt.Errorf("invalid ELF file") } - if l.maxSbpfVersion != sbpfver.SbpfVersionV0 { - if eh.Flags > l.maxSbpfVersion { - return fmt.Errorf("invalid sbpf version") - } - } else { - if eh.Flags == EF_SBPF_V2 { - return fmt.Errorf("invalid sbpf version") - } + if eh.Flags == EF_SBF_V2 { + return fmt.Errorf("invalid sbpf version") } - - if eh.Flags < l.minSbpfVersion { + if eh.Flags < l.minSbpfVersion || eh.Flags > l.maxSbpfVersion { return fmt.Errorf("invalid sbpf version") } - if eh.Phoff < ehLen { + phTableSize := uint64(eh.Phnum) * phEntLen + if phTableSize > 0 && eh.Phoff < ehLen { return fmt.Errorf("program header overlaps with file header") } if eh.Shoff < ehLen { @@ -295,11 +287,19 @@ func (l *Loader) getString(strtab *elf.Section64, stroff uint32, maxLen uint16) if elf.SectionType(strtab.Type) != elf.SHT_STRTAB { return "", fmt.Errorf("invalid strtab") } + if uint64(stroff) >= strtab.Size { + return "", fmt.Errorf("string offset out of bounds") + } offset := strtab.Off + uint64(stroff) - if offset > l.fileSize || offset+uint64(maxLen) > l.fileSize { + remaining := strtab.Size - uint64(stroff) + readLen := uint64(maxLen) + if readLen > remaining { + readLen = remaining + } + if offset > l.fileSize || offset+readLen > l.fileSize { return "", io.ErrUnexpectedEOF } - rd := bufio.NewReader(io.NewSectionReader(l.rd, int64(offset), int64(maxLen))) + rd := bufio.NewReader(io.NewSectionReader(l.rd, int64(offset), int64(readLen))) var builder strings.Builder for { b, err := rd.ReadByte() @@ -338,6 +338,9 @@ func (l *Loader) parseSections() error { case ".bss": return fmt.Errorf("unsupported section .bss") case ".text": + if elf.SectionType(sh.Type) == elf.SHT_NOBITS { + sh.Size = 0 + } err = setSection(&l.shText) case ".symtab": err = setSection(&l.shSymtab) @@ -372,7 +375,7 @@ func (l *Loader) parseSections() error { func (l *Loader) newDynIter() (*dynTableIter, error) { var off uint64 var size uint64 - if ph := l.phDynamic; ph != nil { + if ph := l.phDynamic; ph != nil && ph.Off%8 == 0 && (ph.Off+ph.Filesz) <= l.fileSize && (ph.Off+ph.Filesz) >= ph.Off { off, size = ph.Off, ph.Filesz } else if sh := l.shDynamic; sh != nil { off, size = sh.Off, sh.Size @@ -380,10 +383,6 @@ func (l *Loader) newDynIter() (*dynTableIter, error) { return nil, nil } - // TODO: check Agave - /*if size%dynLen != 0 { - return nil, fmt.Errorf("odd .dynamic size") - }*/ if (off+size) > l.fileSize || (off+size) < off { return nil, io.ErrUnexpectedEOF } @@ -471,7 +470,7 @@ func (l *Loader) parseRelocs() error { if overflow != 0 { return fmt.Errorf("offset underflow") } - offset, overflow = bits.Add64(offset, ph.Vaddr, 0) + offset, overflow = bits.Add64(offset, ph.Off, 0) if overflow != 0 { return fmt.Errorf("offset overflow") } @@ -548,12 +547,18 @@ func (l *Loader) validate() error { func (l *Loader) checkEntrypoint() bool { start := l.shText.Addr + entry := l.eh.Entry + if (entry-start)%8 != 0 || entry < start { + return false + } + if l.shText.Size == 0 { + return true + } end, overflow := bits.Add64(start, l.shText.Size, 0) if overflow != 0 { end = math.MaxUint64 } - entry := l.eh.Entry - return start <= entry && entry < end && (entry-start)%8 == 0 + return entry < end } type shTableIter struct { @@ -847,11 +852,16 @@ func lookupFromTable(l *Loader, section *elf.Section64, i uint32, elemSize uint1 } func (l *Loader) getDynsym(idx uint32) (elf.Sym64, error) { - // TODO is shDynsym.Off checked? + if l.shDynsym == nil { + return elf.Sym64{}, fmt.Errorf("unknown symbol: no dynamic symbol table") + } return lookupFromTable(l, l.shDynsym, idx, symLen) } func (l *Loader) getDynstr(name uint32) (string, error) { + if l.shDynstr == nil { + return "", fmt.Errorf("unknown symbol: no dynamic string table") + } return l.getString(l.shDynstr, name, maxSymbolNameLen) } @@ -863,5 +873,5 @@ func isOverlap(startA uint64, sizeA uint64, startB uint64, sizeB uint64) (bool, if endA < startA || endB < startB { return false, fmt.Errorf("isOverlap: integer overflow") } - return sizeA != 0 && sizeB != 0 && (startA == startB || endA > endB), nil + return sizeA != 0 && sizeB != 0 && (startA == startB || endA > startB), nil } diff --git a/pkg/sbpf/loader/relocate.go b/pkg/sbpf/loader/relocate.go index 2430031d..997e38dd 100644 --- a/pkg/sbpf/loader/relocate.go +++ b/pkg/sbpf/loader/relocate.go @@ -11,6 +11,7 @@ import ( // relocate applies ELF relocations (for syscalls and position-independent code). func (l *Loader) relocate() error { l.funcs = make(map[uint32]int64) + l.funcName = make(map[uint32]int64) if err := l.fixupRelativeCalls(); err != nil { return err } @@ -56,14 +57,13 @@ func (l *Loader) fixupRelativeCalls() error { func (l *Loader) registerFunc(target uint64) (uint32, error) { hash := sbpf.PCHash(target) - // check for collision with syscall if l.syscalls != nil && l.syscalls.ExistsByHash(hash) { return 0, fmt.Errorf("symbol hash collision with syscall") } - //if _, ok := l.funcs[hash]; ok { - // return 0, fmt.Errorf("symbol hash collision for func at=%d hash=%#08x", target, hash) - //} + if existing, ok := l.funcs[hash]; ok && existing != int64(target) { + return 0, fmt.Errorf("symbol hash collision for func at=%d hash=%#08x", target, hash) + } l.funcs[hash] = int64(target) return hash, nil @@ -84,12 +84,15 @@ func (l *Loader) applyDynamicRelocs() error { } func (l *Loader) applyReloc(reloc *elf.Rel64) error { - // TODO rOff is not checked - // Need to have a virtual write target here rOff := reloc.Off rType := R_BPF(elf.R_TYPE64(reloc.Info)) rSym := elf.R_SYM64(reloc.Info) + progLen := uint64(len(l.program)) + if rOff > progLen || progLen-rOff < 16 { + return fmt.Errorf("relocation offset out of bounds") + } + switch rType { case R_BPF_64_64: sym, err := l.getDynsym(rSym) @@ -154,6 +157,13 @@ func (l *Loader) applyReloc(reloc *elf.Rel64) error { return fmt.Errorf("out-of-bounds R_BPF_64_32 function ref") } target := (sym.Value - l.textRange.min) / 8 + + nameHash := sbpf.SymbolHash(name) + if existing, ok := l.funcName[nameHash]; ok && existing != int64(target) { + return fmt.Errorf("symbol hash collision for %s", name) + } + l.funcName[nameHash] = int64(target) + hash, err = l.registerFunc(target) if err != nil { return fmt.Errorf("R_BPF_64_32 function ref: %w", err) diff --git a/pkg/sbpf/verifier.go b/pkg/sbpf/verifier.go index fe1e9763..009e7a17 100644 --- a/pkg/sbpf/verifier.go +++ b/pkg/sbpf/verifier.go @@ -1,6 +1,9 @@ package sbpf -import "fmt" +import ( + "fmt" + "sort" +) type Verifier struct { Program *Program @@ -31,16 +34,36 @@ const ( func (v *Verifier) VerifyProgram() error { v.buildValidationMap() - // TODO: static syscalls logic - functionStart := uint64(0) - functionNext := uint64(0) - text := v.Program.Text if len(text) == 0 { return fmt.Errorf("empty text") } + var funcStarts []int64 + if v.Program.SbpfVersion.EnableStaticSyscalls() { + for _, pc := range v.Program.Funcs { + funcStarts = append(funcStarts, pc) + } + sort.Slice(funcStarts, func(i, j int) bool { return funcStarts[i] < funcStarts[j] }) + } + + functionStart := int64(0) + functionNext := int64(len(text)) + funcIdx := 0 + for pc := 0; pc < len(text); pc++ { + if v.Program.SbpfVersion.EnableStaticSyscalls() { + for funcIdx < len(funcStarts) && int64(pc) >= funcStarts[funcIdx] { + functionStart = funcStarts[funcIdx] + if funcIdx+1 < len(funcStarts) { + functionNext = funcStarts[funcIdx+1] + } else { + functionNext = int64(len(text)) + } + funcIdx++ + } + } + ins := text[pc] if ins.Src() > 10 { @@ -70,7 +93,7 @@ func (v *Verifier) VerifyProgram() error { case verifyCheckJmpV3: { dst := int64(pc) + int64(ins.Off()) + 1 - if dst < int64(functionStart) || dst >= int64(functionNext) { + if dst < functionStart || dst >= functionNext { return fmt.Errorf("jump out of code") } } diff --git a/pkg/sbpf/vm.go b/pkg/sbpf/vm.go index 58d39299..255c4f6d 100644 --- a/pkg/sbpf/vm.go +++ b/pkg/sbpf/vm.go @@ -45,9 +45,9 @@ type VMOpts struct { EnableTracing bool // Execution parameters - Context any // passed to syscalls - MaxCU int - ComputeMeter *cu.ComputeMeter + Context any // passed to syscalls + MaxCU int + ComputeMeter *cu.ComputeMeter Input []byte // mapped at VaddrInput InputDataVaddr uint64 // VM address of instruction data within Input (SIMD-0321) @@ -78,6 +78,7 @@ var ( ExcInvalidInstr = errors.New("invalid instruction - feature not enabled") ExcUnsupportedInstruction = errors.New("unsupported BPF instruction") + ExcExecutionOverrun = errors.New("attempted to execute past the end of the text segment") ) type ExcBadAccess struct { @@ -107,3 +108,15 @@ type ExcCallDest struct { func (e ExcCallDest) Error() string { return fmt.Sprintf("unknown symbol or syscall 0x%08x", e.Imm) } + +type ExcSyscallError struct { + Err error +} + +func (e ExcSyscallError) Error() string { + return fmt.Sprintf("syscall error: %s", e.Err) +} + +func (e ExcSyscallError) Unwrap() error { + return e.Err +}