diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0305594..4337d99 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: '1.23' + go-version: '1.24' - name: Install sqlc run: | diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..b50a942 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,153 @@ +# Copyright IBM Corp. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# +version: "2" +linters: + enable: + - containedctx + - contextcheck + - depguard + - dupl + - errname + - errorlint + - fatcontext + - forcetypeassert + - gocognit + - goconst + - gocritic + - godot + - gosec + - intrange + - ireturn + - lll + - maintidx + - mirror + - misspell + - nilerr + - nilnesserr + - nolintlint + - prealloc + - revive + - rowserrcheck + - sqlclosecheck + - testifylint + - thelper + - unconvert + - unparam + - wastedassign + - whitespace + - goheader + settings: + depguard: + rules: + main: + deny: + - pkg: github.com/pkg/errors + desc: github.com/pkg/errors is no longer maintained + errcheck: + check-type-assertions: true + errorlint: + errorf: true + asserts: false + comparison: true + fatcontext: + check-struct-pointers: true + gocognit: + min-complexity: 15 + gosec: + excludes: + - G204 + - G404 + - G306 + govet: + disable: + - fieldalignment + enable-all: true + ireturn: + allow: + - error + - empty + - anon + - stdlib + - generic + - (or|er)$ + - T + - github.com/jackc/pgx/v5.Tx + lll: + line-length: 120 + maintidx: + under: 20 + nolintlint: + require-specific: true + revive: + enable-all-rules: true + rules: + - name: argument-limit + arguments: + - 4 + - name: line-length-limit + arguments: + - 120 + - name: file-header + disabled: true + - name: package-comments + disabled: true + - name: max-public-structs + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + arguments: + - 3 + - name: add-constant + disabled: true + - name: unhandled-error + arguments: + - fmt.Printf + - fmt.Println + - strings.Builder.WriteString + - strings.Builder.WriteByte + - strings.Builder.Write + - name: confusing-naming + disabled: true + - name: var-naming + disabled: true + - name: comment-spacings + arguments: + - 'nolint:' + rowserrcheck: + packages: + - github.com/jackc/pgx/v5 + goheader: + template: |- + Copyright IBM Corp. All Rights Reserved. + + SPDX-License-Identifier: Apache-2.0 +exclusions: + generated: lax + warn-unused: true + presets: + - common-false-positives +formatters: + enable: + - gofumpt + - goimports + settings: + gofumpt: + extra-rules: true + goimports: + local-prefixes: + - github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer + exclusions: + generated: lax +sort-order: + - file + - severity + - linter diff --git a/Makefile b/Makefile index b188238..5528ecf 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,51 @@ # Copyright IBM Corp. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -.PHONY: sqlc +.PHONY: sqlc lint test test-no-db test-requires-db test-all coverage clean help sqlc: ## Generate Go code from SQL using sqlc @echo "Generating Go code from SQL files..." sqlc generate @echo "✅ SQLC code generation complete" + +lint: ## Run golangci-lint + @echo "Running linter..." + golangci-lint run ./... + @echo "✅ Lint passed" + +test-no-db: ## Run tests that don't require database + @echo "Running tests without database requirement..." + go test -v -count=1 \ + ./pkg/types/... \ + ./pkg/util/... + +test-requires-db: ## Run tests that require database (uses testcontainers by default) + @echo "Running tests that require database..." + go test -v -count=1 ./pkg/db/... + +test-all: ## Run all tests + @echo "Running all tests..." + go test -v -count=1 ./pkg/... + +test: test-all ## Alias for test-all + +coverage: ## Generate test coverage report + @echo "Generating coverage report..." + @mkdir -p coverage + go test -coverprofile=coverage/coverage.out ./pkg/... + go tool cover -html=coverage/coverage.out -o coverage/coverage.html + go tool cover -func=coverage/coverage.out + @echo "" + @echo "Coverage report generated: coverage/coverage.html" + +clean: ## Remove build artifacts and coverage reports + @echo "Cleaning build artifacts..." + rm -rf coverage/ + @echo "Clean complete" + +help: ## Display this help message + @echo "Fabric X Block Explorer - Makefile targets" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' + +.DEFAULT_GOAL := help diff --git a/go.mod b/go.mod index 3157fd7..be85c3e 100644 --- a/go.mod +++ b/go.mod @@ -2,10 +2,74 @@ module github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer go 1.24.0 -require github.com/jackc/pgx/v5 v5.8.0 +require ( + github.com/hyperledger/fabric v2.1.1+incompatible + github.com/jackc/pgx/v5 v5.8.0 + github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go v0.40.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 +) require ( + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v28.5.1+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.8.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - golang.org/x/text v0.29.0 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sykesm/zap-logfmt v0.0.4 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.34.0 // indirect + google.golang.org/grpc v1.79.1 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 87a6c8a..ad8a6e9 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,60 @@ +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= +github.com/hyperledger/fabric v2.1.1+incompatible h1:cYYRv3vVg4kA6DmrixLxwn1nwBEUuYda8DsMwlaMKbY= +github.com/hyperledger/fabric v2.1.1+incompatible/go.mod h1:tGFAOCT696D3rG0Vofd2dyWYLySHlh0aQjf7Q1HAju0= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -9,18 +63,162 @@ github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +github.com/sykesm/zap-logfmt v0.0.4 h1:U2WzRvmIWG1wDLCFY3sz8UeEmsdHQjHFNlIdmroVFaI= +github.com/sykesm/zap-logfmt v0.0.4/go.mod h1:AuBd9xQjAe3URrWT1BBDk2v2onAZHkZkWRMiYZXiZWA= +github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= +github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= +github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 h1:s2bIayFXlbDFexo96y+htn7FzuhpXLYJNnIuglNKqOk= +github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0/go.mod h1:h+u/2KoREGTnTl9UwrQ/g+XhasAT8E6dClclAADeXoQ= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/db/db_test.go b/pkg/db/db_test.go new file mode 100644 index 0000000..c589eaa --- /dev/null +++ b/pkg/db/db_test.go @@ -0,0 +1,67 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package db + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestDatabaseTestEnv verifies that the test infrastructure works correctly. +func TestDatabaseTestEnv(t *testing.T) { + env := NewDatabaseTestEnv(t) + + ctx := context.Background() + err := env.Pool.Ping(ctx) + require.NoError(t, err, "database should be reachable") + + var tableExists bool + err = env.Pool.QueryRow(ctx, ` + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'blocks' + ) + `).Scan(&tableExists) + require.NoError(t, err) + assert.True(t, tableExists, "blocks table should exist") +} + +// TestNewPostgres verifies the NewPostgres function creates a valid connection pool. +func TestNewPostgres(t *testing.T) { + t.Parallel() + + cfg := Config{ + Host: "localhost", + Port: 5432, + User: "postgres", + Password: "postgres", + DBName: "explorer_test", + SSLMode: "", + } + + _, err := NewPostgres(cfg) + if err != nil { + require.Contains(t, err.Error(), "failed to", "error should be connection-related") + } +} + +// TestDatabaseHelpers verifies helper methods in DatabaseTestEnv. +func TestDatabaseHelpers(t *testing.T) { + env := NewDatabaseTestEnv(t) + + blockCount := env.GetBlockCount(t) + assert.Equal(t, int64(0), blockCount, "initial block count should be zero") + + txCount := env.GetTransactionCount(t) + assert.Equal(t, int64(0), txCount, "initial transaction count should be zero") + + env.AssertBlockNotExists(t, 1) +} diff --git a/pkg/db/db_writer.go b/pkg/db/db_writer.go new file mode 100644 index 0000000..543f18f --- /dev/null +++ b/pkg/db/db_writer.go @@ -0,0 +1,244 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package db + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + + "github.com/hyperledger/fabric/common/flogging" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + + dbsqlc "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/db/sqlc" + "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/types" + "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/util" +) + +var logger = flogging.MustGetLogger("db") + +// BlockWriter writes processed blocks to the database. +type BlockWriter struct { + pool *pgxpool.Pool + conn *pgxpool.Conn +} + +// NewBlockWriter creates a BlockWriter backed by a connection pool. +func NewBlockWriter(pool *pgxpool.Pool) *BlockWriter { + return &BlockWriter{pool: pool} +} + +// NewBlockWriterFromConn creates a BlockWriter backed by a single connection. +func NewBlockWriterFromConn(conn *pgxpool.Conn) *BlockWriter { + return &BlockWriter{conn: conn} +} + +// WriteProcessedBlock persists a fully parsed block and all its transactions +// to the database within a single transaction. +func (bw *BlockWriter) WriteProcessedBlock(ctx context.Context, pb *types.ProcessedBlock) error { + if pb == nil { + return errors.New("processed block is nil") + } + + parsedData, ok := pb.Data.(*types.ParsedBlockData) + if !ok { + return errors.New("processed block Data is not *types.ParsedBlockData") + } + + tx, err := bw.beginTx(ctx) + if err != nil { + return err + } + + committed := false + defer func() { + if !committed { + _ = tx.Rollback(ctx) + } + }() + + q := dbsqlc.New(tx) + + if err = q.InsertBlock(ctx, dbsqlc.InsertBlockParams{ + BlockNum: int64(pb.BlockInfo.Number), //nolint:gosec // block numbers fit in int64 + TxCount: int32(pb.Txns), //nolint:gosec // tx count fits in int32 + PreviousHash: pb.BlockInfo.PreviousHash, + DataHash: pb.BlockInfo.DataHash, + }); err != nil { + return err + } + + p, err := buildBatchParams(parsedData) + if err != nil { + return err + } + + if err := p.flush(ctx, q); err != nil { + return err + } + + if err := tx.Commit(ctx); err != nil { + return err + } + committed = true + + logger.Debugf("db: stored block %d with %d transactions", pb.BlockInfo.Number, len(parsedData.Transactions)) + return nil +} + +// beginTx starts a new database transaction using the available connection or pool. +func (bw *BlockWriter) beginTx(ctx context.Context) (pgx.Tx, error) { + switch { + case bw.conn != nil: + return bw.conn.Begin(ctx) + case bw.pool != nil: + return bw.pool.Begin(ctx) + default: + return nil, errors.New("no pool or conn available in BlockWriter") + } +} + +// batchParams holds all the parameter slices for a block's batch inserts. +type batchParams struct { + txParams []dbsqlc.InsertTransactionParams + nsParams []dbsqlc.InsertTxNamespaceParams + readOnlyParams []dbsqlc.InsertReadOnlyParams + readWriteParams []dbsqlc.InsertReadWriteParams + blindWriteParams []dbsqlc.InsertBlindWriteParams + endorseParams []dbsqlc.InsertTxEndorsementParams + policyParams []dbsqlc.UpsertNamespacePolicyParams +} + +// buildBatchParams flattens all parsed block data into per-table param slices. +func buildBatchParams(data *types.ParsedBlockData) (*batchParams, error) { + p := &batchParams{ + txParams: make([]dbsqlc.InsertTransactionParams, 0, len(data.Transactions)), + } + for _, txRec := range data.Transactions { + if err := p.appendTx(txRec); err != nil { + return nil, err + } + } + for _, pol := range data.Policies { + if len(pol.PolicyJSON) == 0 { + continue + } + p.policyParams = append(p.policyParams, dbsqlc.UpsertNamespacePolicyParams{ + Namespace: pol.Namespace, + Version: int64(pol.Version), //nolint:gosec // version fits in int64 + Policy: pol.PolicyJSON, + }) + } + return p, nil +} + +// appendTx adds a transaction and all its namespace data to the param slices. +func (p *batchParams) appendTx(txRec types.TxRecord) error { + txIDBytes, err := hex.DecodeString(txRec.TxID) + if err != nil { + return fmt.Errorf("failed to decode tx_id %s: %w", txRec.TxID, err) + } + p.txParams = append(p.txParams, dbsqlc.InsertTransactionParams{ + BlockNum: int64(txRec.BlockNum), //nolint:gosec // fits in int64 + TxNum: int64(txRec.TxNum), //nolint:gosec // fits in int64 + TxID: txIDBytes, + ValidationCode: int64(txRec.ValidationCode), + }) + for _, ns := range txRec.Namespaces { + p.appendNamespace(txRec.BlockNum, txRec.TxNum, ns) + } + return nil +} + +// appendNamespace adds a single (tx, namespace) pair to the param slices. +func (p *batchParams) appendNamespace(blockNum, txNum uint64, ns types.TxNamespaceRecord) { + bn := int64(blockNum) //nolint:gosec // fits in int64 + tn := int64(txNum) //nolint:gosec // fits in int64 + + p.nsParams = append(p.nsParams, dbsqlc.InsertTxNamespaceParams{ + BlockNum: bn, + TxNum: tn, + NsID: ns.NsID, + NsVersion: int64(ns.NsVersion), //nolint:gosec // fits in int64 + }) + for _, r := range ns.ReadsOnly { + p.readOnlyParams = append(p.readOnlyParams, dbsqlc.InsertReadOnlyParams{ + BlockNum: bn, + TxNum: tn, + NsID: ns.NsID, + Key: []byte(r.Key), + Version: util.PtrToNullableInt64(r.Version), + }) + } + for _, rw := range ns.ReadWrites { + p.readWriteParams = append(p.readWriteParams, dbsqlc.InsertReadWriteParams{ + BlockNum: bn, + TxNum: tn, + NsID: ns.NsID, + Key: []byte(rw.Key), + ReadVersion: util.PtrToNullableInt64(rw.ReadVersion), + Value: rw.Value, + }) + } + for _, bw := range ns.BlindWrites { + p.blindWriteParams = append(p.blindWriteParams, dbsqlc.InsertBlindWriteParams{ + BlockNum: bn, + TxNum: tn, + NsID: ns.NsID, + Key: []byte(bw.Key), + Value: bw.Value, + }) + } + for _, e := range ns.Endorsements { + p.endorseParams = append(p.endorseParams, dbsqlc.InsertTxEndorsementParams{ + BlockNum: bn, + TxNum: tn, + NsID: ns.NsID, + Endorsement: e.Endorsement, + MspID: util.PtrToNullableString(e.MspID), + Identity: e.Identity, + }) + } +} + +// flush executes all batch inserts, skipping any empty param slices. +func (p *batchParams) flush(ctx context.Context, q *dbsqlc.Queries) error { + type entry struct { + n int + f func() error + } + for _, e := range []entry{ + {len(p.txParams), func() error { return execBatch(q.InsertTransaction(ctx, p.txParams).Exec) }}, + {len(p.nsParams), func() error { return execBatch(q.InsertTxNamespace(ctx, p.nsParams).Exec) }}, + {len(p.readOnlyParams), func() error { return execBatch(q.InsertReadOnly(ctx, p.readOnlyParams).Exec) }}, + {len(p.readWriteParams), func() error { return execBatch(q.InsertReadWrite(ctx, p.readWriteParams).Exec) }}, + {len(p.blindWriteParams), func() error { return execBatch(q.InsertBlindWrite(ctx, p.blindWriteParams).Exec) }}, + {len(p.endorseParams), func() error { return execBatch(q.InsertTxEndorsement(ctx, p.endorseParams).Exec) }}, + {len(p.policyParams), func() error { return execBatch(q.UpsertNamespacePolicy(ctx, p.policyParams).Exec) }}, + } { + if e.n == 0 { + continue + } + if err := e.f(); err != nil { + return err + } + } + return nil +} + +// execBatch drains a batchexec result set and returns the first error encountered. +func execBatch(exec func(func(int, error))) error { + var batchErr error + exec(func(_ int, err error) { + if err != nil && batchErr == nil { + batchErr = err + } + }) + return batchErr +} diff --git a/pkg/db/db_writer_test.go b/pkg/db/db_writer_test.go new file mode 100644 index 0000000..80fbe59 --- /dev/null +++ b/pkg/db/db_writer_test.go @@ -0,0 +1,397 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package db + +import ( + "context" + "encoding/hex" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/types" +) + +// TestWriteProcessedBlock tests writing a complete block with all components. +func TestWriteProcessedBlock(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + txID := "abc123def456" + txIDBytes, err := hex.DecodeString(txID) + require.NoError(t, err) + + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{ + { + BlockNum: 1, + TxNum: 0, + TxID: txID, + ValidationCode: 0, + Namespaces: []types.TxNamespaceRecord{ + { + NsID: "mycc", + NsVersion: 1, + ReadWrites: []types.ReadWriteRecord{ + {Key: "key1", ReadVersion: uint64Ptr(10), Value: []byte("value1")}, + }, + Endorsements: []types.EndorsementRecord{ + { + Endorsement: []byte("endorsement_sig"), + MspID: strPtr("Org1MSP"), + Identity: []byte(`{"mspid":"Org1MSP","id_bytes":"cert"}`), + }, + }, + }, + }, + }, + }, + Policies: []types.NamespacePolicyRecord{ + { + Namespace: "mycc", + Version: 1, + PolicyJSON: json.RawMessage(`{"policy_bytes":"cG9saWN5"}`), + }, + }, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 1, + PreviousHash: []byte("prevhash"), + DataHash: []byte("datahash"), + }, + Data: parsedData, + Txns: 1, + } + + writer := NewBlockWriter(env.Pool) + err = writer.WriteProcessedBlock(ctx, processedBlock) + require.NoError(t, err) + + block, err := env.Queries.GetBlock(ctx, 1) + require.NoError(t, err) + assert.Equal(t, int64(1), block.BlockNum) + assert.Equal(t, int32(1), block.TxCount) + assert.Equal(t, []byte("prevhash"), block.PreviousHash) + assert.Equal(t, []byte("datahash"), block.DataHash) + + tx, err := env.Queries.GetValidationCodeByTxID(ctx, txIDBytes) + require.NoError(t, err) + assert.Equal(t, int64(1), tx.BlockNum) + assert.Equal(t, int64(0), tx.TxNum) + + assert.Equal(t, int64(1), env.GetBlockCount(t)) + assert.Equal(t, int64(1), env.GetTransactionCount(t)) +} + +// TestWriteProcessedBlockWithBlindWrites tests writing blind writes. +func TestWriteProcessedBlockWithBlindWrites(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + txID := "deadbeef" + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{ + { + BlockNum: 2, + TxNum: 0, + TxID: txID, + ValidationCode: 0, + Namespaces: []types.TxNamespaceRecord{ + { + NsID: "testcc", + NsVersion: 1, + BlindWrites: []types.BlindWriteRecord{ + // Blind writes have no read version + {Key: "blindkey", Value: []byte("blindvalue")}, + }, + }, + }, + }, + }, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 2, + PreviousHash: []byte("prev2"), + DataHash: []byte("data2"), + }, + Data: parsedData, + Txns: 1, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.NoError(t, err) + + env.AssertBlockExists(t, 2) + + var key []byte + err = env.Pool.QueryRow(ctx, ` + SELECT key + FROM tx_blind_writes + WHERE ns_id = $1 AND key = $2 + `, "testcc", []byte("blindkey")).Scan(&key) + require.NoError(t, err) + assert.Equal(t, []byte("blindkey"), key) +} + +// TestWriteProcessedBlockMultipleTransactions tests multiple transactions in one block. +func TestWriteProcessedBlockMultipleTransactions(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{ + { + BlockNum: 3, + TxNum: 0, + TxID: "0000000000000000000000000000000000000000000000000000000000000001", + ValidationCode: 0, + Namespaces: []types.TxNamespaceRecord{ + { + NsID: "cc1", + NsVersion: 1, + BlindWrites: []types.BlindWriteRecord{ + {Key: "key1", Value: []byte("val1")}, + }, + }, + }, + }, + { + BlockNum: 3, + TxNum: 1, + TxID: "0000000000000000000000000000000000000000000000000000000000000002", + ValidationCode: 0, + Namespaces: []types.TxNamespaceRecord{ + { + NsID: "cc2", + NsVersion: 1, + BlindWrites: []types.BlindWriteRecord{ + {Key: "key2", Value: []byte("val2")}, + }, + }, + }, + }, + }, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 3, + PreviousHash: []byte("prev3"), + DataHash: []byte("data3"), + }, + Data: parsedData, + Txns: 2, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.NoError(t, err) + + assert.Equal(t, int64(2), env.GetTransactionCount(t)) + + var count int64 + err = env.Pool.QueryRow(ctx, "SELECT COUNT(*) FROM tx_namespaces").Scan(&count) + require.NoError(t, err) + assert.Equal(t, int64(2), count) +} + +// TestWriteProcessedBlockNilBlock tests error handling for nil block. +func TestWriteProcessedBlockNilBlock(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "nil") +} + +// TestWriteProcessedBlockInvalidData tests error handling for invalid data type. +func TestWriteProcessedBlockInvalidData(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 1, + }, + Data: "invalid_data_type", // Wrong type + Txns: 0, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.Error(t, err) + assert.Contains(t, err.Error(), "not *types.ParsedBlockData") +} + +// TestWriteProcessedBlockWithPolicies tests policy upsert functionality. +func TestWriteProcessedBlockWithPolicies(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + policyJSON := json.RawMessage(`{"policy_bytes":"base64encodedpolicy"}`) + + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{}, + Policies: []types.NamespacePolicyRecord{ + { + Namespace: "mycc", + Version: 1, + PolicyJSON: policyJSON, + }, + }, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 4, + PreviousHash: []byte("prev4"), + DataHash: []byte("data4"), + }, + Data: parsedData, + Txns: 0, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.NoError(t, err) + + policies, err := env.Queries.GetNamespacePolicies(ctx, "mycc") + require.NoError(t, err) + assert.Len(t, policies, 1) + assert.Equal(t, "mycc", policies[0].Namespace) + assert.Equal(t, int64(1), policies[0].Version) + + parsedData2 := &types.ParsedBlockData{ + Transactions: []types.TxRecord{}, + Policies: []types.NamespacePolicyRecord{ + { + Namespace: "mycc", + Version: 2, + PolicyJSON: json.RawMessage(`{"policy_bytes":"updated"}`), + }, + }, + } + + processedBlock2 := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 5, + PreviousHash: []byte("prev5"), + DataHash: []byte("data5"), + }, + Data: parsedData2, + Txns: 0, + } + + err = writer.WriteProcessedBlock(ctx, processedBlock2) + require.NoError(t, err) + + policies, err = env.Queries.GetNamespacePolicies(ctx, "mycc") + require.NoError(t, err) + assert.Len(t, policies, 2) +} + +// TestWriteProcessedBlockRollbackOnError tests transaction rollback on error. +func TestWriteProcessedBlockRollbackOnError(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{ + { + BlockNum: 6, + TxNum: 0, + TxID: "invalid_hex_ZZZ", // Invalid hex string + ValidationCode: 0, + Namespaces: []types.TxNamespaceRecord{}, + }, + }, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 6, + PreviousHash: []byte("prev6"), + DataHash: []byte("data6"), + }, + Data: parsedData, + Txns: 1, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to decode tx_id") + + env.AssertBlockNotExists(t, 6) +} + +// TestNewBlockWriter tests BlockWriter constructors. +func TestNewBlockWriter(t *testing.T) { + env := NewDatabaseTestEnv(t) + + writer1 := NewBlockWriter(env.Pool) + assert.NotNil(t, writer1) + assert.NotNil(t, writer1.pool) + assert.Nil(t, writer1.conn) + + ctx := context.Background() + conn, err := env.Pool.Acquire(ctx) + require.NoError(t, err) + defer conn.Release() + + writer2 := NewBlockWriterFromConn(conn) + assert.NotNil(t, writer2) + assert.NotNil(t, writer2.conn) + assert.Nil(t, writer2.pool) +} + +// TestWriteProcessedBlockEmptyComponents tests writing block with empty slices. +func TestWriteProcessedBlockEmptyComponents(t *testing.T) { + env := NewDatabaseTestEnv(t) + ctx := context.Background() + + parsedData := &types.ParsedBlockData{ + Transactions: []types.TxRecord{}, + Policies: []types.NamespacePolicyRecord{}, + } + + processedBlock := &types.ProcessedBlock{ + BlockInfo: &types.BlockInfo{ + Number: 7, + PreviousHash: []byte("prev7"), + DataHash: []byte("data7"), + }, + Data: parsedData, + Txns: 0, + } + + writer := NewBlockWriter(env.Pool) + err := writer.WriteProcessedBlock(ctx, processedBlock) + require.NoError(t, err) + + env.AssertBlockExists(t, 7) + assert.Equal(t, int64(0), env.GetTransactionCount(t)) +} + +// Helper functions + +func uint64Ptr(v uint64) *uint64 { + return &v +} + +func strPtr(s string) *string { + return &s +} diff --git a/pkg/db/dbtest/testcontainer.go b/pkg/db/dbtest/testcontainer.go new file mode 100644 index 0000000..4463473 --- /dev/null +++ b/pkg/db/dbtest/testcontainer.go @@ -0,0 +1,143 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package dbtest + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + // PostgreSQL configuration. + testDBName = "explorer_test" + testDBUser = "postgres" + testDBPassword = "postgres" +) + +// TestContainer holds the PostgreSQL testcontainer instance. +type TestContainer struct { + Container *postgres.PostgresContainer + Pool *pgxpool.Pool + DSN string +} + +// PrepareTestEnv sets up a PostgreSQL testcontainer for testing. +// It checks the DB_DEPLOYMENT environment variable: +// - If set to "local", it connects to a local PostgreSQL instance +// - Otherwise, it spins up a new testcontainer. +func PrepareTestEnv(t *testing.T) *TestContainer { + t.Helper() + + ctx := context.Background() + + if os.Getenv("DB_DEPLOYMENT") == "local" { + return prepareLocalDB(ctx, t) + } + + return prepareTestContainer(ctx, t) +} + +// prepareLocalDB connects to local postgres. +func prepareLocalDB(ctx context.Context, t *testing.T) *TestContainer { + t.Helper() + + dsn := fmt.Sprintf( + "postgres://%s:%s@localhost:5432/%s?sslmode=disable", + testDBUser, + testDBPassword, + testDBName, + ) + + pool, err := pgxpool.New(ctx, dsn) + require.NoError(t, err, "failed to connect to local database") + + err = pool.Ping(ctx) + require.NoError(t, err, "failed to ping local database") + + // Clean all tables before each test + cleanDatabase(ctx, t, pool) + + return &TestContainer{ + Container: nil, // no container when using local + Pool: pool, + DSN: dsn, + } +} + +// cleanDatabase drops all tables for a clean test state. +func cleanDatabase(ctx context.Context, t *testing.T, pool *pgxpool.Pool) { + t.Helper() + + _, err := pool.Exec(ctx, ` + DROP TABLE IF EXISTS tx_endorsements CASCADE; + DROP TABLE IF EXISTS tx_blind_writes CASCADE; + DROP TABLE IF EXISTS tx_read_writes CASCADE; + DROP TABLE IF EXISTS tx_reads_only CASCADE; + DROP TABLE IF EXISTS tx_namespaces CASCADE; + DROP TABLE IF EXISTS transactions CASCADE; + DROP TABLE IF EXISTS namespace_policies CASCADE; + DROP TABLE IF EXISTS blocks CASCADE; + `) + require.NoError(t, err, "failed to clean database") +} + +// prepareTestContainer creates a PostgreSQL testcontainer. +func prepareTestContainer(ctx context.Context, t *testing.T) *TestContainer { + t.Helper() + + postgresContainer, err := postgres.Run(ctx, + "postgres:14-alpine", + postgres.WithDatabase(testDBName), + postgres.WithUsername(testDBUser), + postgres.WithPassword(testDBPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(60*time.Second), + ), + ) + require.NoError(t, err, "failed to start postgres container") + + dsn, err := postgresContainer.ConnectionString(ctx, "sslmode=disable") + require.NoError(t, err, "failed to get connection string") + + pool, err := pgxpool.New(ctx, dsn) + require.NoError(t, err, "failed to create connection pool") + + err = pool.Ping(ctx) + require.NoError(t, err, "failed to ping database") + + return &TestContainer{ + Container: postgresContainer, + Pool: pool, + DSN: dsn, + } +} + +// Close cleans up the test database resources. +func (tc *TestContainer) Close(t *testing.T) { + t.Helper() + + if tc.Pool != nil { + tc.Pool.Close() + } + + if tc.Container != nil { + ctx := context.Background() + err := tc.Container.Terminate(ctx) + require.NoError(t, err, "failed to terminate container") + } +} diff --git a/pkg/db/postgress.go b/pkg/db/postgress.go new file mode 100644 index 0000000..c961c78 --- /dev/null +++ b/pkg/db/postgress.go @@ -0,0 +1,53 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package db + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5/pgxpool" +) + +// Config holds PostgreSQL connection configuration. +type Config struct { + Host string + Port int + User string + Password string //nolint:gosec // intentional: field holds a connection credential + DBName string + SSLMode string +} + +// NewPostgres creates a new pgx connection pool using the given config. +func NewPostgres(cfg Config) (*pgxpool.Pool, error) { + if cfg.SSLMode == "" { + cfg.SSLMode = "disable" + } + + dsn := fmt.Sprintf( + "postgres://%s:%s@%s:%d/%s?sslmode=%s&pool_max_conns=20", + cfg.User, + cfg.Password, + cfg.Host, + cfg.Port, + cfg.DBName, + cfg.SSLMode, + ) + + pool, err := pgxpool.New(context.Background(), dsn) + if err != nil { + return nil, fmt.Errorf("failed to create pgx pool: %w", err) + } + + if err := pool.Ping(context.Background()); err != nil { + pool.Close() + return nil, fmt.Errorf("failed to connect postgres: %w", err) + } + + return pool, nil +} diff --git a/pkg/db/queries/namespace_policies.sql b/pkg/db/queries/namespace_policies.sql index 4751a52..0b32cf1 100644 --- a/pkg/db/queries/namespace_policies.sql +++ b/pkg/db/queries/namespace_policies.sql @@ -1,10 +1,10 @@ --- name: UpsertNamespacePolicy :exec +-- name: UpsertNamespacePolicy :batchexec INSERT INTO namespace_policies (namespace, version, policy) VALUES ($1, $2, $3) ON CONFLICT (namespace, version) DO UPDATE SET policy = EXCLUDED.policy; -- name: GetNamespacePolicies :many -SELECT id, namespace, version, policy +SELECT namespace, version, policy FROM namespace_policies WHERE namespace = $1 ORDER BY version DESC; diff --git a/pkg/db/queries/transactions.sql b/pkg/db/queries/transactions.sql index 6878f87..23d439f 100644 --- a/pkg/db/queries/transactions.sql +++ b/pkg/db/queries/transactions.sql @@ -1,36 +1,21 @@ --- name: InsertTransaction :one +-- name: InsertTransaction :batchexec INSERT INTO transactions (block_num, tx_num, tx_id, validation_code) VALUES ($1, $2, $3, $4) -ON CONFLICT (block_num, tx_num) DO UPDATE SET tx_id = EXCLUDED.tx_id -RETURNING id; +ON CONFLICT (block_num, tx_num) DO UPDATE SET tx_id = EXCLUDED.tx_id; -- name: GetValidationCodeByBlock :many -SELECT id, block_num, tx_num, tx_id, validation_code +SELECT block_num, tx_num, tx_id, validation_code FROM transactions WHERE block_num = $1 ORDER BY tx_num LIMIT $2 OFFSET $3; -- name: GetValidationCodeByTxID :one -SELECT id, block_num, tx_num, tx_id, validation_code +SELECT block_num, tx_num, tx_id, validation_code FROM transactions WHERE tx_id = $1; --- name: GetTransactionID :one -SELECT id -FROM transactions -WHERE block_num = $1 AND tx_num = $2; - --- name: InsertTxNamespace :one -INSERT INTO tx_namespaces (transaction_id, ns_id, ns_version) -VALUES ($1, $2, $3) -ON CONFLICT (transaction_id, ns_id) DO UPDATE SET ns_version = EXCLUDED.ns_version -RETURNING id; - --- name: InsertTxRead :exec -INSERT INTO tx_reads (tx_namespace_id, key, version, is_read_write) -VALUES ($1, $2, $3, $4); - --- name: InsertTxWrite :exec -INSERT INTO tx_writes (tx_namespace_id, key, value, is_blind_write, read_version) -VALUES ($1, $2, $3, $4, $5); +-- name: InsertTxNamespace :batchexec +INSERT INTO tx_namespaces (block_num, tx_num, ns_id, ns_version) +VALUES ($1, $2, $3, $4) +ON CONFLICT (block_num, tx_num, ns_id) DO UPDATE SET ns_version = EXCLUDED.ns_version; diff --git a/pkg/db/queries/tx_blind_writes.sql b/pkg/db/queries/tx_blind_writes.sql new file mode 100644 index 0000000..0c10a16 --- /dev/null +++ b/pkg/db/queries/tx_blind_writes.sql @@ -0,0 +1,10 @@ +-- name: InsertBlindWrite :batchexec +INSERT INTO tx_blind_writes (block_num, tx_num, ns_id, key, value) +VALUES ($1, $2, $3, $4, $5); + +-- name: GetBlindWritesByTx :many +SELECT ns_id, key, value +FROM tx_blind_writes +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key +LIMIT $3 OFFSET $4; diff --git a/pkg/db/queries/tx_endorsements.sql b/pkg/db/queries/tx_endorsements.sql index 6a60d0d..337fc0e 100644 --- a/pkg/db/queries/tx_endorsements.sql +++ b/pkg/db/queries/tx_endorsements.sql @@ -1,11 +1,9 @@ --- name: InsertTxEndorsement :exec -INSERT INTO tx_endorsements (tx_namespace_id, endorsement, msp_id, identity) -VALUES ($1, $2, $3, $4); +-- name: InsertTxEndorsement :batchexec +INSERT INTO tx_endorsements (block_num, tx_num, ns_id, endorsement, msp_id, identity) +VALUES ($1, $2, $3, $4, $5, $6); -- name: GetEndorsementsByTx :many -SELECT te.id, te.endorsement, te.msp_id, te.identity, tn.ns_id -FROM tx_endorsements te -JOIN tx_namespaces tn ON te.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY te.id; +SELECT ns_id, endorsement, msp_id, identity +FROM tx_endorsements +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, endorsement; diff --git a/pkg/db/queries/tx_read_writes.sql b/pkg/db/queries/tx_read_writes.sql new file mode 100644 index 0000000..e480aca --- /dev/null +++ b/pkg/db/queries/tx_read_writes.sql @@ -0,0 +1,9 @@ +-- name: InsertReadWrite :batchexec +INSERT INTO tx_read_writes (block_num, tx_num, ns_id, key, read_version, value) +VALUES ($1, $2, $3, $4, $5, $6); + +-- name: GetReadWritesByTx :many +SELECT ns_id, key, read_version, value +FROM tx_read_writes +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key; diff --git a/pkg/db/queries/tx_reads.sql b/pkg/db/queries/tx_reads.sql deleted file mode 100644 index b3daa8e..0000000 --- a/pkg/db/queries/tx_reads.sql +++ /dev/null @@ -1,7 +0,0 @@ --- name: GetReadsByTx :many -SELECT tr.id, tr.key, tr.version, tr.is_read_write, tn.ns_id -FROM tx_reads tr -JOIN tx_namespaces tn ON tr.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY tr.id; diff --git a/pkg/db/queries/tx_reads_only.sql b/pkg/db/queries/tx_reads_only.sql new file mode 100644 index 0000000..6e48072 --- /dev/null +++ b/pkg/db/queries/tx_reads_only.sql @@ -0,0 +1,9 @@ +-- name: InsertReadOnly :batchexec +INSERT INTO tx_reads_only (block_num, tx_num, ns_id, key, version) +VALUES ($1, $2, $3, $4, $5); + +-- name: GetReadsOnlyByTx :many +SELECT ns_id, key, version +FROM tx_reads_only +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key; diff --git a/pkg/db/queries/tx_writes.sql b/pkg/db/queries/tx_writes.sql deleted file mode 100644 index ba94d70..0000000 --- a/pkg/db/queries/tx_writes.sql +++ /dev/null @@ -1,14 +0,0 @@ --- name: GetWritesByTx :many -SELECT - tw.id, - tw.key, - tw.value, - tw.is_blind_write, - tw.read_version, - tn.ns_id -FROM tx_writes tw -JOIN tx_namespaces tn ON tw.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY tw.id -LIMIT $3 OFFSET $4; diff --git a/pkg/db/schema.sql b/pkg/db/schema.sql index 88db7c3..12044b2 100644 --- a/pkg/db/schema.sql +++ b/pkg/db/schema.sql @@ -6,59 +6,79 @@ CREATE TABLE IF NOT EXISTS blocks ( ); CREATE TABLE IF NOT EXISTS transactions ( - id BIGSERIAL PRIMARY KEY, block_num BIGINT NOT NULL REFERENCES blocks(block_num), - tx_num BIGINT NOT NULL, - tx_id BYTEA NOT NULL, - validation_code BIGINT NOT NULL, - UNIQUE (block_num, tx_num) + tx_num BIGINT NOT NULL, + tx_id BYTEA NOT NULL, + validation_code BIGINT NOT NULL, + PRIMARY KEY (block_num, tx_num) ); CREATE TABLE IF NOT EXISTS tx_namespaces ( - id BIGSERIAL PRIMARY KEY, - transaction_id BIGINT NOT NULL REFERENCES transactions(id), - ns_id TEXT NOT NULL, + block_num BIGINT NOT NULL, + tx_num BIGINT NOT NULL, + ns_id TEXT NOT NULL, ns_version BIGINT NOT NULL, - UNIQUE (transaction_id, ns_id) + PRIMARY KEY (block_num, tx_num, ns_id), + FOREIGN KEY (block_num, tx_num) REFERENCES transactions(block_num, tx_num) ); -CREATE TABLE IF NOT EXISTS tx_reads ( - id BIGSERIAL PRIMARY KEY, - tx_namespace_id BIGINT NOT NULL REFERENCES tx_namespaces(id), - key BYTEA NOT NULL, - version BIGINT, - is_read_write BOOLEAN NOT NULL DEFAULT FALSE +-- Keys that were only read (no write). From ns.ReadsOnly in the block. +CREATE TABLE IF NOT EXISTS tx_reads_only ( + block_num BIGINT NOT NULL, + tx_num BIGINT NOT NULL, + ns_id TEXT NOT NULL, + key BYTEA NOT NULL, + version BIGINT, + PRIMARY KEY (block_num, tx_num, ns_id, key), + FOREIGN KEY (block_num, tx_num, ns_id) REFERENCES tx_namespaces(block_num, tx_num, ns_id) ); -CREATE TABLE IF NOT EXISTS tx_writes ( - id BIGSERIAL PRIMARY KEY, - tx_namespace_id BIGINT NOT NULL REFERENCES tx_namespaces(id), - key BYTEA NOT NULL, - value BYTEA, - is_blind_write BOOLEAN NOT NULL DEFAULT FALSE, - read_version BIGINT +-- Keys that were both read and written. From ns.ReadWrites in the block. +CREATE TABLE IF NOT EXISTS tx_read_writes ( + block_num BIGINT NOT NULL, + tx_num BIGINT NOT NULL, + ns_id TEXT NOT NULL, + key BYTEA NOT NULL, + read_version BIGINT, + value BYTEA, + PRIMARY KEY (block_num, tx_num, ns_id, key), + FOREIGN KEY (block_num, tx_num, ns_id) REFERENCES tx_namespaces(block_num, tx_num, ns_id) +); + +-- Keys that were written without a prior read. From ns.BlindWrites in the block. +CREATE TABLE IF NOT EXISTS tx_blind_writes ( + block_num BIGINT NOT NULL, + tx_num BIGINT NOT NULL, + ns_id TEXT NOT NULL, + key BYTEA NOT NULL, + value BYTEA, + PRIMARY KEY (block_num, tx_num, ns_id, key), + FOREIGN KEY (block_num, tx_num, ns_id) REFERENCES tx_namespaces(block_num, tx_num, ns_id) ); CREATE TABLE IF NOT EXISTS tx_endorsements ( - id BIGSERIAL PRIMARY KEY, - tx_namespace_id BIGINT NOT NULL REFERENCES tx_namespaces(id), - endorsement BYTEA NOT NULL, - msp_id TEXT, - identity JSONB + block_num BIGINT NOT NULL, + tx_num BIGINT NOT NULL, + ns_id TEXT NOT NULL, + endorsement BYTEA NOT NULL, + msp_id TEXT, + identity JSONB, + PRIMARY KEY (block_num, tx_num, ns_id, endorsement), + FOREIGN KEY (block_num, tx_num, ns_id) REFERENCES tx_namespaces(block_num, tx_num, ns_id) ); CREATE TABLE IF NOT EXISTS namespace_policies ( - id BIGSERIAL PRIMARY KEY, - namespace TEXT NOT NULL, - version BIGINT NOT NULL, - policy JSONB, - UNIQUE (namespace, version) + namespace TEXT NOT NULL, + version BIGINT NOT NULL, + policy JSONB, + PRIMARY KEY (namespace, version) ); --- Indexes for foreign key columns to improve JOIN performance -CREATE INDEX IF NOT EXISTS idx_transactions_block_num ON transactions(block_num); -CREATE INDEX IF NOT EXISTS idx_tx_namespaces_transaction_id ON tx_namespaces(transaction_id); -CREATE INDEX IF NOT EXISTS idx_tx_reads_tx_namespace_id ON tx_reads(tx_namespace_id); -CREATE INDEX IF NOT EXISTS idx_tx_writes_tx_namespace_id ON tx_writes(tx_namespace_id); -CREATE INDEX IF NOT EXISTS idx_tx_endorsements_tx_namespace_id ON tx_endorsements(tx_namespace_id); -CREATE INDEX IF NOT EXISTS idx_namespace_policies_namespace ON namespace_policies(namespace); +-- Indexes to improve lookup performance. +CREATE INDEX IF NOT EXISTS idx_transactions_block_num ON transactions(block_num); +CREATE INDEX IF NOT EXISTS idx_tx_namespaces_block_tx ON tx_namespaces(block_num, tx_num); +CREATE INDEX IF NOT EXISTS idx_tx_reads_only_block_tx_ns ON tx_reads_only(block_num, tx_num, ns_id); +CREATE INDEX IF NOT EXISTS idx_tx_read_writes_block_tx_ns ON tx_read_writes(block_num, tx_num, ns_id); +CREATE INDEX IF NOT EXISTS idx_tx_blind_writes_block_tx_ns ON tx_blind_writes(block_num, tx_num, ns_id); +CREATE INDEX IF NOT EXISTS idx_tx_endorsements_block_tx_ns ON tx_endorsements(block_num, tx_num, ns_id); +CREATE INDEX IF NOT EXISTS idx_namespace_policies_namespace ON namespace_policies(namespace); diff --git a/pkg/db/sqlc/batch.go b/pkg/db/sqlc/batch.go new file mode 100644 index 0000000..9931ef1 --- /dev/null +++ b/pkg/db/sqlc/batch.go @@ -0,0 +1,409 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: batch.go + +package dbsqlc + +import ( + "context" + "errors" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" +) + +var ( + ErrBatchAlreadyClosed = errors.New("batch already closed") +) + +const insertBlindWrite = `-- name: InsertBlindWrite :batchexec +INSERT INTO tx_blind_writes (block_num, tx_num, ns_id, key, value) +VALUES ($1, $2, $3, $4, $5) +` + +type InsertBlindWriteBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertBlindWriteParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Value []byte `json:"value"` +} + +func (q *Queries) InsertBlindWrite(ctx context.Context, arg []InsertBlindWriteParams) *InsertBlindWriteBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.NsID, + a.Key, + a.Value, + } + batch.Queue(insertBlindWrite, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertBlindWriteBatchResults{br, len(arg), false} +} + +func (b *InsertBlindWriteBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertBlindWriteBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const insertReadOnly = `-- name: InsertReadOnly :batchexec +INSERT INTO tx_reads_only (block_num, tx_num, ns_id, key, version) +VALUES ($1, $2, $3, $4, $5) +` + +type InsertReadOnlyBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertReadOnlyParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Version pgtype.Int8 `json:"version"` +} + +func (q *Queries) InsertReadOnly(ctx context.Context, arg []InsertReadOnlyParams) *InsertReadOnlyBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.NsID, + a.Key, + a.Version, + } + batch.Queue(insertReadOnly, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertReadOnlyBatchResults{br, len(arg), false} +} + +func (b *InsertReadOnlyBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertReadOnlyBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const insertReadWrite = `-- name: InsertReadWrite :batchexec +INSERT INTO tx_read_writes (block_num, tx_num, ns_id, key, read_version, value) +VALUES ($1, $2, $3, $4, $5, $6) +` + +type InsertReadWriteBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertReadWriteParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + ReadVersion pgtype.Int8 `json:"read_version"` + Value []byte `json:"value"` +} + +func (q *Queries) InsertReadWrite(ctx context.Context, arg []InsertReadWriteParams) *InsertReadWriteBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.NsID, + a.Key, + a.ReadVersion, + a.Value, + } + batch.Queue(insertReadWrite, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertReadWriteBatchResults{br, len(arg), false} +} + +func (b *InsertReadWriteBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertReadWriteBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const insertTransaction = `-- name: InsertTransaction :batchexec +INSERT INTO transactions (block_num, tx_num, tx_id, validation_code) +VALUES ($1, $2, $3, $4) +ON CONFLICT (block_num, tx_num) DO UPDATE SET tx_id = EXCLUDED.tx_id +` + +type InsertTransactionBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertTransactionParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + TxID []byte `json:"tx_id"` + ValidationCode int64 `json:"validation_code"` +} + +func (q *Queries) InsertTransaction(ctx context.Context, arg []InsertTransactionParams) *InsertTransactionBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.TxID, + a.ValidationCode, + } + batch.Queue(insertTransaction, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertTransactionBatchResults{br, len(arg), false} +} + +func (b *InsertTransactionBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertTransactionBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const insertTxEndorsement = `-- name: InsertTxEndorsement :batchexec +INSERT INTO tx_endorsements (block_num, tx_num, ns_id, endorsement, msp_id, identity) +VALUES ($1, $2, $3, $4, $5, $6) +` + +type InsertTxEndorsementBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertTxEndorsementParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Endorsement []byte `json:"endorsement"` + MspID pgtype.Text `json:"msp_id"` + Identity []byte `json:"identity"` +} + +func (q *Queries) InsertTxEndorsement(ctx context.Context, arg []InsertTxEndorsementParams) *InsertTxEndorsementBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.NsID, + a.Endorsement, + a.MspID, + a.Identity, + } + batch.Queue(insertTxEndorsement, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertTxEndorsementBatchResults{br, len(arg), false} +} + +func (b *InsertTxEndorsementBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertTxEndorsementBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const insertTxNamespace = `-- name: InsertTxNamespace :batchexec +INSERT INTO tx_namespaces (block_num, tx_num, ns_id, ns_version) +VALUES ($1, $2, $3, $4) +ON CONFLICT (block_num, tx_num, ns_id) DO UPDATE SET ns_version = EXCLUDED.ns_version +` + +type InsertTxNamespaceBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type InsertTxNamespaceParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + NsVersion int64 `json:"ns_version"` +} + +func (q *Queries) InsertTxNamespace(ctx context.Context, arg []InsertTxNamespaceParams) *InsertTxNamespaceBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.BlockNum, + a.TxNum, + a.NsID, + a.NsVersion, + } + batch.Queue(insertTxNamespace, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &InsertTxNamespaceBatchResults{br, len(arg), false} +} + +func (b *InsertTxNamespaceBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *InsertTxNamespaceBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const upsertNamespacePolicy = `-- name: UpsertNamespacePolicy :batchexec +INSERT INTO namespace_policies (namespace, version, policy) +VALUES ($1, $2, $3) +ON CONFLICT (namespace, version) DO UPDATE SET policy = EXCLUDED.policy +` + +type UpsertNamespacePolicyBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type UpsertNamespacePolicyParams struct { + Namespace string `json:"namespace"` + Version int64 `json:"version"` + Policy []byte `json:"policy"` +} + +func (q *Queries) UpsertNamespacePolicy(ctx context.Context, arg []UpsertNamespacePolicyParams) *UpsertNamespacePolicyBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.Namespace, + a.Version, + a.Policy, + } + batch.Queue(upsertNamespacePolicy, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &UpsertNamespacePolicyBatchResults{br, len(arg), false} +} + +func (b *UpsertNamespacePolicyBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *UpsertNamespacePolicyBatchResults) Close() error { + b.closed = true + return b.br.Close() +} diff --git a/pkg/db/sqlc/db.go b/pkg/db/sqlc/db.go index 7563f38..b54a21c 100644 --- a/pkg/db/sqlc/db.go +++ b/pkg/db/sqlc/db.go @@ -15,6 +15,7 @@ type DBTX interface { Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) Query(context.Context, string, ...interface{}) (pgx.Rows, error) QueryRow(context.Context, string, ...interface{}) pgx.Row + SendBatch(context.Context, *pgx.Batch) pgx.BatchResults } func New(db DBTX) *Queries { diff --git a/pkg/db/sqlc/models.go b/pkg/db/sqlc/models.go index 104c8d6..f4c5d7c 100644 --- a/pkg/db/sqlc/models.go +++ b/pkg/db/sqlc/models.go @@ -16,48 +16,55 @@ type Block struct { } type NamespacePolicy struct { - ID int64 `json:"id"` Namespace string `json:"namespace"` Version int64 `json:"version"` Policy []byte `json:"policy"` } type Transaction struct { - ID int64 `json:"id"` BlockNum int64 `json:"block_num"` TxNum int64 `json:"tx_num"` TxID []byte `json:"tx_id"` ValidationCode int64 `json:"validation_code"` } +type TxBlindWrite struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Value []byte `json:"value"` +} + type TxEndorsement struct { - ID int64 `json:"id"` - TxNamespaceID int64 `json:"tx_namespace_id"` - Endorsement []byte `json:"endorsement"` - MspID pgtype.Text `json:"msp_id"` - Identity []byte `json:"identity"` + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Endorsement []byte `json:"endorsement"` + MspID pgtype.Text `json:"msp_id"` + Identity []byte `json:"identity"` } type TxNamespace struct { - ID int64 `json:"id"` - TransactionID int64 `json:"transaction_id"` - NsID string `json:"ns_id"` - NsVersion int64 `json:"ns_version"` -} - -type TxRead struct { - ID int64 `json:"id"` - TxNamespaceID int64 `json:"tx_namespace_id"` - Key []byte `json:"key"` - Version pgtype.Int8 `json:"version"` - IsReadWrite bool `json:"is_read_write"` -} - -type TxWrite struct { - ID int64 `json:"id"` - TxNamespaceID int64 `json:"tx_namespace_id"` - Key []byte `json:"key"` - Value []byte `json:"value"` - IsBlindWrite bool `json:"is_blind_write"` - ReadVersion pgtype.Int8 `json:"read_version"` + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + NsVersion int64 `json:"ns_version"` +} + +type TxReadWrite struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + ReadVersion pgtype.Int8 `json:"read_version"` + Value []byte `json:"value"` +} + +type TxReadsOnly struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Version pgtype.Int8 `json:"version"` } diff --git a/pkg/db/sqlc/namespace_policies.sql.go b/pkg/db/sqlc/namespace_policies.sql.go index 8ead733..f9de29c 100644 --- a/pkg/db/sqlc/namespace_policies.sql.go +++ b/pkg/db/sqlc/namespace_policies.sql.go @@ -10,7 +10,7 @@ import ( ) const getNamespacePolicies = `-- name: GetNamespacePolicies :many -SELECT id, namespace, version, policy +SELECT namespace, version, policy FROM namespace_policies WHERE namespace = $1 ORDER BY version DESC @@ -25,12 +25,7 @@ func (q *Queries) GetNamespacePolicies(ctx context.Context, namespace string) ([ items := []NamespacePolicy{} for rows.Next() { var i NamespacePolicy - if err := rows.Scan( - &i.ID, - &i.Namespace, - &i.Version, - &i.Policy, - ); err != nil { + if err := rows.Scan(&i.Namespace, &i.Version, &i.Policy); err != nil { return nil, err } items = append(items, i) @@ -40,20 +35,3 @@ func (q *Queries) GetNamespacePolicies(ctx context.Context, namespace string) ([ } return items, nil } - -const upsertNamespacePolicy = `-- name: UpsertNamespacePolicy :exec -INSERT INTO namespace_policies (namespace, version, policy) -VALUES ($1, $2, $3) -ON CONFLICT (namespace, version) DO UPDATE SET policy = EXCLUDED.policy -` - -type UpsertNamespacePolicyParams struct { - Namespace string `json:"namespace"` - Version int64 `json:"version"` - Policy []byte `json:"policy"` -} - -func (q *Queries) UpsertNamespacePolicy(ctx context.Context, arg UpsertNamespacePolicyParams) error { - _, err := q.db.Exec(ctx, upsertNamespacePolicy, arg.Namespace, arg.Version, arg.Policy) - return err -} diff --git a/pkg/db/sqlc/querier.go b/pkg/db/sqlc/querier.go index 12e1150..1379728 100644 --- a/pkg/db/sqlc/querier.go +++ b/pkg/db/sqlc/querier.go @@ -9,22 +9,23 @@ import ( ) type Querier interface { + GetBlindWritesByTx(ctx context.Context, arg GetBlindWritesByTxParams) ([]GetBlindWritesByTxRow, error) GetBlock(ctx context.Context, blockNum int64) (Block, error) GetBlockHeight(ctx context.Context) (interface{}, error) GetEndorsementsByTx(ctx context.Context, arg GetEndorsementsByTxParams) ([]GetEndorsementsByTxRow, error) GetNamespacePolicies(ctx context.Context, namespace string) ([]NamespacePolicy, error) - GetReadsByTx(ctx context.Context, arg GetReadsByTxParams) ([]GetReadsByTxRow, error) - GetTransactionID(ctx context.Context, arg GetTransactionIDParams) (int64, error) + GetReadWritesByTx(ctx context.Context, arg GetReadWritesByTxParams) ([]GetReadWritesByTxRow, error) + GetReadsOnlyByTx(ctx context.Context, arg GetReadsOnlyByTxParams) ([]GetReadsOnlyByTxRow, error) GetValidationCodeByBlock(ctx context.Context, arg GetValidationCodeByBlockParams) ([]Transaction, error) GetValidationCodeByTxID(ctx context.Context, txID []byte) (Transaction, error) - GetWritesByTx(ctx context.Context, arg GetWritesByTxParams) ([]GetWritesByTxRow, error) + InsertBlindWrite(ctx context.Context, arg []InsertBlindWriteParams) *InsertBlindWriteBatchResults InsertBlock(ctx context.Context, arg InsertBlockParams) error - InsertTransaction(ctx context.Context, arg InsertTransactionParams) (int64, error) - InsertTxEndorsement(ctx context.Context, arg InsertTxEndorsementParams) error - InsertTxNamespace(ctx context.Context, arg InsertTxNamespaceParams) (int64, error) - InsertTxRead(ctx context.Context, arg InsertTxReadParams) error - InsertTxWrite(ctx context.Context, arg InsertTxWriteParams) error - UpsertNamespacePolicy(ctx context.Context, arg UpsertNamespacePolicyParams) error + InsertReadOnly(ctx context.Context, arg []InsertReadOnlyParams) *InsertReadOnlyBatchResults + InsertReadWrite(ctx context.Context, arg []InsertReadWriteParams) *InsertReadWriteBatchResults + InsertTransaction(ctx context.Context, arg []InsertTransactionParams) *InsertTransactionBatchResults + InsertTxEndorsement(ctx context.Context, arg []InsertTxEndorsementParams) *InsertTxEndorsementBatchResults + InsertTxNamespace(ctx context.Context, arg []InsertTxNamespaceParams) *InsertTxNamespaceBatchResults + UpsertNamespacePolicy(ctx context.Context, arg []UpsertNamespacePolicyParams) *UpsertNamespacePolicyBatchResults } var _ Querier = (*Queries)(nil) diff --git a/pkg/db/sqlc/transactions.sql.go b/pkg/db/sqlc/transactions.sql.go index 9651c37..fa85c3a 100644 --- a/pkg/db/sqlc/transactions.sql.go +++ b/pkg/db/sqlc/transactions.sql.go @@ -7,30 +7,10 @@ package dbsqlc import ( "context" - - "github.com/jackc/pgx/v5/pgtype" ) -const getTransactionID = `-- name: GetTransactionID :one -SELECT id -FROM transactions -WHERE block_num = $1 AND tx_num = $2 -` - -type GetTransactionIDParams struct { - BlockNum int64 `json:"block_num"` - TxNum int64 `json:"tx_num"` -} - -func (q *Queries) GetTransactionID(ctx context.Context, arg GetTransactionIDParams) (int64, error) { - row := q.db.QueryRow(ctx, getTransactionID, arg.BlockNum, arg.TxNum) - var id int64 - err := row.Scan(&id) - return id, err -} - const getValidationCodeByBlock = `-- name: GetValidationCodeByBlock :many -SELECT id, block_num, tx_num, tx_id, validation_code +SELECT block_num, tx_num, tx_id, validation_code FROM transactions WHERE block_num = $1 ORDER BY tx_num @@ -53,7 +33,6 @@ func (q *Queries) GetValidationCodeByBlock(ctx context.Context, arg GetValidatio for rows.Next() { var i Transaction if err := rows.Scan( - &i.ID, &i.BlockNum, &i.TxNum, &i.TxID, @@ -70,7 +49,7 @@ func (q *Queries) GetValidationCodeByBlock(ctx context.Context, arg GetValidatio } const getValidationCodeByTxID = `-- name: GetValidationCodeByTxID :one -SELECT id, block_num, tx_num, tx_id, validation_code +SELECT block_num, tx_num, tx_id, validation_code FROM transactions WHERE tx_id = $1 ` @@ -79,7 +58,6 @@ func (q *Queries) GetValidationCodeByTxID(ctx context.Context, txID []byte) (Tra row := q.db.QueryRow(ctx, getValidationCodeByTxID, txID) var i Transaction err := row.Scan( - &i.ID, &i.BlockNum, &i.TxNum, &i.TxID, @@ -87,95 +65,3 @@ func (q *Queries) GetValidationCodeByTxID(ctx context.Context, txID []byte) (Tra ) return i, err } - -const insertTransaction = `-- name: InsertTransaction :one -INSERT INTO transactions (block_num, tx_num, tx_id, validation_code) -VALUES ($1, $2, $3, $4) -ON CONFLICT (block_num, tx_num) DO UPDATE SET tx_id = EXCLUDED.tx_id -RETURNING id -` - -type InsertTransactionParams struct { - BlockNum int64 `json:"block_num"` - TxNum int64 `json:"tx_num"` - TxID []byte `json:"tx_id"` - ValidationCode int64 `json:"validation_code"` -} - -func (q *Queries) InsertTransaction(ctx context.Context, arg InsertTransactionParams) (int64, error) { - row := q.db.QueryRow(ctx, insertTransaction, - arg.BlockNum, - arg.TxNum, - arg.TxID, - arg.ValidationCode, - ) - var id int64 - err := row.Scan(&id) - return id, err -} - -const insertTxNamespace = `-- name: InsertTxNamespace :one -INSERT INTO tx_namespaces (transaction_id, ns_id, ns_version) -VALUES ($1, $2, $3) -ON CONFLICT (transaction_id, ns_id) DO UPDATE SET ns_version = EXCLUDED.ns_version -RETURNING id -` - -type InsertTxNamespaceParams struct { - TransactionID int64 `json:"transaction_id"` - NsID string `json:"ns_id"` - NsVersion int64 `json:"ns_version"` -} - -func (q *Queries) InsertTxNamespace(ctx context.Context, arg InsertTxNamespaceParams) (int64, error) { - row := q.db.QueryRow(ctx, insertTxNamespace, arg.TransactionID, arg.NsID, arg.NsVersion) - var id int64 - err := row.Scan(&id) - return id, err -} - -const insertTxRead = `-- name: InsertTxRead :exec -INSERT INTO tx_reads (tx_namespace_id, key, version, is_read_write) -VALUES ($1, $2, $3, $4) -` - -type InsertTxReadParams struct { - TxNamespaceID int64 `json:"tx_namespace_id"` - Key []byte `json:"key"` - Version pgtype.Int8 `json:"version"` - IsReadWrite bool `json:"is_read_write"` -} - -func (q *Queries) InsertTxRead(ctx context.Context, arg InsertTxReadParams) error { - _, err := q.db.Exec(ctx, insertTxRead, - arg.TxNamespaceID, - arg.Key, - arg.Version, - arg.IsReadWrite, - ) - return err -} - -const insertTxWrite = `-- name: InsertTxWrite :exec -INSERT INTO tx_writes (tx_namespace_id, key, value, is_blind_write, read_version) -VALUES ($1, $2, $3, $4, $5) -` - -type InsertTxWriteParams struct { - TxNamespaceID int64 `json:"tx_namespace_id"` - Key []byte `json:"key"` - Value []byte `json:"value"` - IsBlindWrite bool `json:"is_blind_write"` - ReadVersion pgtype.Int8 `json:"read_version"` -} - -func (q *Queries) InsertTxWrite(ctx context.Context, arg InsertTxWriteParams) error { - _, err := q.db.Exec(ctx, insertTxWrite, - arg.TxNamespaceID, - arg.Key, - arg.Value, - arg.IsBlindWrite, - arg.ReadVersion, - ) - return err -} diff --git a/pkg/db/sqlc/tx_blind_writes.sql.go b/pkg/db/sqlc/tx_blind_writes.sql.go new file mode 100644 index 0000000..5e5a097 --- /dev/null +++ b/pkg/db/sqlc/tx_blind_writes.sql.go @@ -0,0 +1,56 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: tx_blind_writes.sql + +package dbsqlc + +import ( + "context" +) + +const getBlindWritesByTx = `-- name: GetBlindWritesByTx :many +SELECT ns_id, key, value +FROM tx_blind_writes +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key +LIMIT $3 OFFSET $4 +` + +type GetBlindWritesByTxParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` + Limit int32 `json:"limit"` + Offset int32 `json:"offset"` +} + +type GetBlindWritesByTxRow struct { + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Value []byte `json:"value"` +} + +func (q *Queries) GetBlindWritesByTx(ctx context.Context, arg GetBlindWritesByTxParams) ([]GetBlindWritesByTxRow, error) { + rows, err := q.db.Query(ctx, getBlindWritesByTx, + arg.BlockNum, + arg.TxNum, + arg.Limit, + arg.Offset, + ) + if err != nil { + return nil, err + } + defer rows.Close() + items := []GetBlindWritesByTxRow{} + for rows.Next() { + var i GetBlindWritesByTxRow + if err := rows.Scan(&i.NsID, &i.Key, &i.Value); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/pkg/db/sqlc/tx_endorsements.sql.go b/pkg/db/sqlc/tx_endorsements.sql.go index 8438fba..1dbf5b1 100644 --- a/pkg/db/sqlc/tx_endorsements.sql.go +++ b/pkg/db/sqlc/tx_endorsements.sql.go @@ -12,12 +12,10 @@ import ( ) const getEndorsementsByTx = `-- name: GetEndorsementsByTx :many -SELECT te.id, te.endorsement, te.msp_id, te.identity, tn.ns_id -FROM tx_endorsements te -JOIN tx_namespaces tn ON te.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY te.id +SELECT ns_id, endorsement, msp_id, identity +FROM tx_endorsements +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, endorsement ` type GetEndorsementsByTxParams struct { @@ -26,11 +24,10 @@ type GetEndorsementsByTxParams struct { } type GetEndorsementsByTxRow struct { - ID int64 `json:"id"` + NsID string `json:"ns_id"` Endorsement []byte `json:"endorsement"` MspID pgtype.Text `json:"msp_id"` Identity []byte `json:"identity"` - NsID string `json:"ns_id"` } func (q *Queries) GetEndorsementsByTx(ctx context.Context, arg GetEndorsementsByTxParams) ([]GetEndorsementsByTxRow, error) { @@ -43,11 +40,10 @@ func (q *Queries) GetEndorsementsByTx(ctx context.Context, arg GetEndorsementsBy for rows.Next() { var i GetEndorsementsByTxRow if err := rows.Scan( - &i.ID, + &i.NsID, &i.Endorsement, &i.MspID, &i.Identity, - &i.NsID, ); err != nil { return nil, err } @@ -58,25 +54,3 @@ func (q *Queries) GetEndorsementsByTx(ctx context.Context, arg GetEndorsementsBy } return items, nil } - -const insertTxEndorsement = `-- name: InsertTxEndorsement :exec -INSERT INTO tx_endorsements (tx_namespace_id, endorsement, msp_id, identity) -VALUES ($1, $2, $3, $4) -` - -type InsertTxEndorsementParams struct { - TxNamespaceID int64 `json:"tx_namespace_id"` - Endorsement []byte `json:"endorsement"` - MspID pgtype.Text `json:"msp_id"` - Identity []byte `json:"identity"` -} - -func (q *Queries) InsertTxEndorsement(ctx context.Context, arg InsertTxEndorsementParams) error { - _, err := q.db.Exec(ctx, insertTxEndorsement, - arg.TxNamespaceID, - arg.Endorsement, - arg.MspID, - arg.Identity, - ) - return err -} diff --git a/pkg/db/sqlc/tx_read_writes.sql.go b/pkg/db/sqlc/tx_read_writes.sql.go new file mode 100644 index 0000000..cb8732b --- /dev/null +++ b/pkg/db/sqlc/tx_read_writes.sql.go @@ -0,0 +1,56 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: tx_read_writes.sql + +package dbsqlc + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const getReadWritesByTx = `-- name: GetReadWritesByTx :many +SELECT ns_id, key, read_version, value +FROM tx_read_writes +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key +` + +type GetReadWritesByTxParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` +} + +type GetReadWritesByTxRow struct { + NsID string `json:"ns_id"` + Key []byte `json:"key"` + ReadVersion pgtype.Int8 `json:"read_version"` + Value []byte `json:"value"` +} + +func (q *Queries) GetReadWritesByTx(ctx context.Context, arg GetReadWritesByTxParams) ([]GetReadWritesByTxRow, error) { + rows, err := q.db.Query(ctx, getReadWritesByTx, arg.BlockNum, arg.TxNum) + if err != nil { + return nil, err + } + defer rows.Close() + items := []GetReadWritesByTxRow{} + for rows.Next() { + var i GetReadWritesByTxRow + if err := rows.Scan( + &i.NsID, + &i.Key, + &i.ReadVersion, + &i.Value, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/pkg/db/sqlc/tx_reads.sql.go b/pkg/db/sqlc/tx_reads.sql.go deleted file mode 100644 index 6584ba3..0000000 --- a/pkg/db/sqlc/tx_reads.sql.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.30.0 -// source: tx_reads.sql - -package dbsqlc - -import ( - "context" - - "github.com/jackc/pgx/v5/pgtype" -) - -const getReadsByTx = `-- name: GetReadsByTx :many -SELECT tr.id, tr.key, tr.version, tr.is_read_write, tn.ns_id -FROM tx_reads tr -JOIN tx_namespaces tn ON tr.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY tr.id -` - -type GetReadsByTxParams struct { - BlockNum int64 `json:"block_num"` - TxNum int64 `json:"tx_num"` -} - -type GetReadsByTxRow struct { - ID int64 `json:"id"` - Key []byte `json:"key"` - Version pgtype.Int8 `json:"version"` - IsReadWrite bool `json:"is_read_write"` - NsID string `json:"ns_id"` -} - -func (q *Queries) GetReadsByTx(ctx context.Context, arg GetReadsByTxParams) ([]GetReadsByTxRow, error) { - rows, err := q.db.Query(ctx, getReadsByTx, arg.BlockNum, arg.TxNum) - if err != nil { - return nil, err - } - defer rows.Close() - items := []GetReadsByTxRow{} - for rows.Next() { - var i GetReadsByTxRow - if err := rows.Scan( - &i.ID, - &i.Key, - &i.Version, - &i.IsReadWrite, - &i.NsID, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} diff --git a/pkg/db/sqlc/tx_reads_only.sql.go b/pkg/db/sqlc/tx_reads_only.sql.go new file mode 100644 index 0000000..7a0efd2 --- /dev/null +++ b/pkg/db/sqlc/tx_reads_only.sql.go @@ -0,0 +1,50 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: tx_reads_only.sql + +package dbsqlc + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const getReadsOnlyByTx = `-- name: GetReadsOnlyByTx :many +SELECT ns_id, key, version +FROM tx_reads_only +WHERE block_num = $1 AND tx_num = $2 +ORDER BY ns_id, key +` + +type GetReadsOnlyByTxParams struct { + BlockNum int64 `json:"block_num"` + TxNum int64 `json:"tx_num"` +} + +type GetReadsOnlyByTxRow struct { + NsID string `json:"ns_id"` + Key []byte `json:"key"` + Version pgtype.Int8 `json:"version"` +} + +func (q *Queries) GetReadsOnlyByTx(ctx context.Context, arg GetReadsOnlyByTxParams) ([]GetReadsOnlyByTxRow, error) { + rows, err := q.db.Query(ctx, getReadsOnlyByTx, arg.BlockNum, arg.TxNum) + if err != nil { + return nil, err + } + defer rows.Close() + items := []GetReadsOnlyByTxRow{} + for rows.Next() { + var i GetReadsOnlyByTxRow + if err := rows.Scan(&i.NsID, &i.Key, &i.Version); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/pkg/db/sqlc/tx_writes.sql.go b/pkg/db/sqlc/tx_writes.sql.go deleted file mode 100644 index 6672ba1..0000000 --- a/pkg/db/sqlc/tx_writes.sql.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.30.0 -// source: tx_writes.sql - -package dbsqlc - -import ( - "context" - - "github.com/jackc/pgx/v5/pgtype" -) - -const getWritesByTx = `-- name: GetWritesByTx :many -SELECT - tw.id, - tw.key, - tw.value, - tw.is_blind_write, - tw.read_version, - tn.ns_id -FROM tx_writes tw -JOIN tx_namespaces tn ON tw.tx_namespace_id = tn.id -JOIN transactions t ON tn.transaction_id = t.id -WHERE t.block_num = $1 AND t.tx_num = $2 -ORDER BY tw.id -LIMIT $3 OFFSET $4 -` - -type GetWritesByTxParams struct { - BlockNum int64 `json:"block_num"` - TxNum int64 `json:"tx_num"` - Limit int32 `json:"limit"` - Offset int32 `json:"offset"` -} - -type GetWritesByTxRow struct { - ID int64 `json:"id"` - Key []byte `json:"key"` - Value []byte `json:"value"` - IsBlindWrite bool `json:"is_blind_write"` - ReadVersion pgtype.Int8 `json:"read_version"` - NsID string `json:"ns_id"` -} - -func (q *Queries) GetWritesByTx(ctx context.Context, arg GetWritesByTxParams) ([]GetWritesByTxRow, error) { - rows, err := q.db.Query(ctx, getWritesByTx, - arg.BlockNum, - arg.TxNum, - arg.Limit, - arg.Offset, - ) - if err != nil { - return nil, err - } - defer rows.Close() - items := []GetWritesByTxRow{} - for rows.Next() { - var i GetWritesByTxRow - if err := rows.Scan( - &i.ID, - &i.Key, - &i.Value, - &i.IsBlindWrite, - &i.ReadVersion, - &i.NsID, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} diff --git a/pkg/db/test_exports.go b/pkg/db/test_exports.go new file mode 100644 index 0000000..0a7bca1 --- /dev/null +++ b/pkg/db/test_exports.go @@ -0,0 +1,96 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package db + +import ( + "context" + _ "embed" + "testing" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/require" + + "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/db/dbtest" + dbsqlc "github.com/LF-Decentralized-Trust-labs/fabric-x-block-explorer/pkg/db/sqlc" +) + +//go:embed schema.sql +var schemaSQL string + +// DatabaseTestEnv provides a test database environment. +type DatabaseTestEnv struct { + Pool *pgxpool.Pool + Queries *dbsqlc.Queries + tc *dbtest.TestContainer +} + +// NewDatabaseTestEnv creates a new test environment with a PostgreSQL testcontainer. +// The schema is automatically initialized, and cleanup is registered with t.Cleanup(). +func NewDatabaseTestEnv(t *testing.T) *DatabaseTestEnv { + t.Helper() + + tc := dbtest.PrepareTestEnv(t) + + ctx := context.Background() + _, err := tc.Pool.Exec(ctx, schemaSQL) + require.NoError(t, err, "failed to initialize database schema") + + queries := dbsqlc.New(tc.Pool) + + env := &DatabaseTestEnv{ + Pool: tc.Pool, + Queries: queries, + tc: tc, + } + + t.Cleanup(func() { + tc.Close(t) + }) + + return env +} + +// AssertBlockExists checks that a block exists. +func (env *DatabaseTestEnv) AssertBlockExists(t *testing.T, blockNum int64) { + t.Helper() + + ctx := context.Background() + block, err := env.Queries.GetBlock(ctx, blockNum) + require.NoError(t, err, "block %d should exist", blockNum) + require.Equal(t, blockNum, block.BlockNum) +} + +// AssertBlockNotExists checks that a block does not exist. +func (env *DatabaseTestEnv) AssertBlockNotExists(t *testing.T, blockNum int64) { + t.Helper() + + ctx := context.Background() + _, err := env.Queries.GetBlock(ctx, blockNum) + require.Error(t, err, "block %d should not exist", blockNum) +} + +// GetBlockCount returns the number of blocks. +func (env *DatabaseTestEnv) GetBlockCount(t *testing.T) int64 { + t.Helper() + + ctx := context.Background() + var count int64 + err := env.Pool.QueryRow(ctx, "SELECT COUNT(*) FROM blocks").Scan(&count) + require.NoError(t, err, "failed to count blocks") + return count +} + +// GetTransactionCount returns the total number of transactions in the database. +func (env *DatabaseTestEnv) GetTransactionCount(t *testing.T) int64 { + t.Helper() + + ctx := context.Background() + var count int64 + err := env.Pool.QueryRow(ctx, "SELECT COUNT(*) FROM transactions").Scan(&count) + require.NoError(t, err, "failed to count transactions") + return count +} diff --git a/pkg/types/types.go b/pkg/types/types.go new file mode 100644 index 0000000..6cf9fff --- /dev/null +++ b/pkg/types/types.go @@ -0,0 +1,84 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" +) + +// ParsedBlockData contains all parsed data from a block, organized by transaction. +type ParsedBlockData struct { + Transactions []TxRecord + Policies []NamespacePolicyRecord +} + +// ProcessedBlock wraps parsed block data with metadata for persistence. +type ProcessedBlock struct { + Txns int + Data any + BlockInfo *BlockInfo +} + +// BlockInfo contains block header metadata. +type BlockInfo struct { + Number uint64 + PreviousHash []byte + DataHash []byte +} + +// TxRecord groups all data for a single transaction within a block. +type TxRecord struct { + BlockNum uint64 + TxNum uint64 + TxID string + ValidationCode int32 + Namespaces []TxNamespaceRecord +} + +// TxNamespaceRecord represents a single (transaction, namespace) pair entry, +// containing all reads, writes, and endorsements for that namespace. +type TxNamespaceRecord struct { + NsID string + NsVersion uint64 + ReadsOnly []ReadOnlyRecord + ReadWrites []ReadWriteRecord + BlindWrites []BlindWriteRecord + Endorsements []EndorsementRecord +} + +// ReadOnlyRecord represents a read-only operation within a namespace. +type ReadOnlyRecord struct { + Key string + Version *uint64 +} + +// ReadWriteRecord represents a read-write operation within a namespace. +type ReadWriteRecord struct { + Key string + ReadVersion *uint64 + Value []byte +} + +// BlindWriteRecord represents a blind write operation within a namespace. +type BlindWriteRecord struct { + Key string + Value []byte +} + +// EndorsementRecord represents a signature endorsement for a namespace. +type EndorsementRecord struct { + Endorsement []byte + MspID *string + Identity []byte +} + +// NamespacePolicyRecord represents a policy update for a namespace. +type NamespacePolicyRecord struct { + Namespace string + Version uint64 + PolicyJSON json.RawMessage +} diff --git a/pkg/util/nullable.go b/pkg/util/nullable.go new file mode 100644 index 0000000..cb5e794 --- /dev/null +++ b/pkg/util/nullable.go @@ -0,0 +1,41 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package util + +import "github.com/jackc/pgx/v5/pgtype" + +// NullableInt64ToPtr converts a pgtype.Int8 to *int64. +func NullableInt64ToPtr(v pgtype.Int8) *int64 { + if !v.Valid { + return nil + } + return &v.Int64 +} + +// NullableStringToPtr converts a pgtype.Text to *string. +func NullableStringToPtr(v pgtype.Text) *string { + if !v.Valid { + return nil + } + return &v.String +} + +// PtrToNullableInt64 converts *uint64 to pgtype.Int8. +func PtrToNullableInt64(v *uint64) pgtype.Int8 { + if v == nil { + return pgtype.Int8{Valid: false} + } + return pgtype.Int8{Int64: int64(*v), Valid: true} //nolint:gosec // version numbers fit in int64 +} + +// PtrToNullableString converts *string to pgtype.Text. +func PtrToNullableString(v *string) pgtype.Text { + if v == nil { + return pgtype.Text{Valid: false} + } + return pgtype.Text{String: *v, Valid: true} +} diff --git a/pkg/util/nullable_test.go b/pkg/util/nullable_test.go new file mode 100644 index 0000000..9194dd6 --- /dev/null +++ b/pkg/util/nullable_test.go @@ -0,0 +1,179 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package util + +import ( + "testing" + + "github.com/jackc/pgx/v5/pgtype" + "github.com/stretchr/testify/assert" +) + +func TestNullableInt64ToPtr(t *testing.T) { + tests := []struct { + name string + input pgtype.Int8 + want *int64 + }{ + { + name: "valid int64", + input: pgtype.Int8{Int64: 42, Valid: true}, + want: ptr(int64(42)), + }, + { + name: "null int64", + input: pgtype.Int8{Valid: false}, + want: nil, + }, + { + name: "zero value", + input: pgtype.Int8{Int64: 0, Valid: true}, + want: ptr(int64(0)), + }, + { + name: "negative value", + input: pgtype.Int8{Int64: -100, Valid: true}, + want: ptr(int64(-100)), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := NullableInt64ToPtr(tt.input) + if tt.want == nil { + assert.Nil(t, result) + } else { + assert.NotNil(t, result) + assert.Equal(t, *tt.want, *result) + } + }) + } +} + +func TestNullableStringToPtr(t *testing.T) { + tests := []struct { + name string + input pgtype.Text + want *string + }{ + { + name: "valid string", + input: pgtype.Text{String: "hello", Valid: true}, + want: ptr("hello"), + }, + { + name: "null string", + input: pgtype.Text{Valid: false}, + want: nil, + }, + { + name: "empty string", + input: pgtype.Text{String: "", Valid: true}, + want: ptr(""), + }, + { + name: "string with spaces", + input: pgtype.Text{String: " spaces ", Valid: true}, + want: ptr(" spaces "), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := NullableStringToPtr(tt.input) + if tt.want == nil { + assert.Nil(t, result) + } else { + assert.NotNil(t, result) + assert.Equal(t, *tt.want, *result) + } + }) + } +} + +func TestPtrToNullableInt64(t *testing.T) { + tests := []struct { + name string + input *uint64 + want pgtype.Int8 + }{ + { + name: "valid uint64", + input: ptr(uint64(42)), + want: pgtype.Int8{Int64: 42, Valid: true}, + }, + { + name: "nil pointer", + input: nil, + want: pgtype.Int8{Valid: false}, + }, + { + name: "zero value", + input: ptr(uint64(0)), + want: pgtype.Int8{Int64: 0, Valid: true}, + }, + { + name: "large value", + input: ptr(uint64(1234567890)), + want: pgtype.Int8{Int64: 1234567890, Valid: true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := PtrToNullableInt64(tt.input) + assert.Equal(t, tt.want.Valid, result.Valid) + if tt.want.Valid { + assert.Equal(t, tt.want.Int64, result.Int64) + } + }) + } +} + +func TestPtrToNullableString(t *testing.T) { + tests := []struct { + name string + input *string + want pgtype.Text + }{ + { + name: "valid string", + input: ptr("hello"), + want: pgtype.Text{String: "hello", Valid: true}, + }, + { + name: "nil pointer", + input: nil, + want: pgtype.Text{Valid: false}, + }, + { + name: "empty string", + input: ptr(""), + want: pgtype.Text{String: "", Valid: true}, + }, + { + name: "string with special characters", + input: ptr("test\nline\ttab"), + want: pgtype.Text{String: "test\nline\ttab", Valid: true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := PtrToNullableString(tt.input) + assert.Equal(t, tt.want.Valid, result.Valid) + if tt.want.Valid { + assert.Equal(t, tt.want.String, result.String) + } + }) + } +} + +// Helper function to create pointers. +func ptr[T any](v T) *T { + return &v +}