diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6cb20ca8a..8c3c95c25 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -179,6 +179,8 @@ jobs: target: "./itests/harmonydb_test.go" - name: test-itest-alertnow target: "./itests/alertnow_test.go" + - name: test-itest-pdp-prove + target: "./itests/pdp_prove_test.go" steps: - uses: actions/checkout@v4 @@ -311,6 +313,10 @@ jobs: run: go install github.com/hannahhoward/cbor-gen-for shell: bash + - name: Install swag cli + run: go install github.com/swaggo/swag/cmd/swag@v1.16.4 + shell: bash + # - name: Install gotext # run: go install golang.org/x/text/cmd/gotext # shell: bash diff --git a/Dockerfile b/Dockerfile index 0b4a00bb5..2a42de94e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,11 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH \ RUST_VERSION=1.63.0 +COPY ./ /opt/curio +WORKDIR /opt/curio +RUN git submodule update --init +RUN go mod download + RUN set -eux; \ dpkgArch="$(dpkg --print-architecture)"; \ case "${dpkgArch##*-}" in \ @@ -32,9 +37,6 @@ RUN set -eux; \ cargo --version; \ rustc --version; -COPY ./ /opt/curio -WORKDIR /opt/curio - ### make configurable filecoin-ffi build ARG FFI_BUILD_FROM_SOURCE=0 ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} @@ -56,13 +58,26 @@ RUN go install github.com/ipld/go-car/cmd/car@latest \ RUN go install github.com/LexLuthr/piece-server@latest \ && cp $GOPATH/bin/piece-server /usr/local/bin/ -RUN go install github.com/ipni/storetheindex@v0.8.38 \ +RUN go install github.com/ipni/storetheindex@latest \ && cp $GOPATH/bin/storetheindex /usr/local/bin/ +RUN go install github.com/ethereum/go-ethereum/cmd/geth@latest \ + && cp $GOPATH/bin/geth /usr/local/bin/ + ##################################### FROM ubuntu:22.04 AS curio-all-in-one -RUN apt-get update && apt-get install -y dnsutils vim curl aria2 jq +RUN apt-get update && apt-get install -y dnsutils vim curl aria2 jq git wget nodejs npm + +# Install Foundry +RUN curl -L https://foundry.paradigm.xyz | bash \ + && bash -c ". ~/.foundry/bin/foundryup" + +# Make sure foundry binaries are available in PATH +ENV PATH="/root/.foundry/bin:${PATH}" + +# Verify installation +RUN forge --version && cast --version && anvil --version # Copy libraries and binaries from curio-builder COPY --from=curio-builder /etc/ssl/certs /etc/ssl/certs @@ -98,6 +113,7 @@ COPY --from=curio-builder /opt/curio/sptool /usr/local/bin/ COPY --from=piece-server-builder /usr/local/bin/piece-server /usr/local/bin/ COPY --from=piece-server-builder /usr/local/bin/car /usr/local/bin/ COPY --from=piece-server-builder /usr/local/bin/storetheindex /usr/local/bin/ +COPY --from=piece-server-builder /usr/local/bin/geth /usr/local/bin/ # Set up directories and permissions RUN mkdir /var/tmp/filecoin-proof-parameters \ diff --git a/Makefile b/Makefile index 81c041a32..8de510501 100644 --- a/Makefile +++ b/Makefile @@ -266,7 +266,11 @@ go-generate: gen: gensimple .PHONY: gen -gensimple: api-gen go-generate cfgdoc-gen docsgen docsgen-cli +marketgen: + swag init -dir market/mk20/http -g http.go -o market/mk20/http --parseDependencyLevel 3 --parseDependency +.PHONY: marketgen + +gensimple: api-gen go-generate cfgdoc-gen docsgen marketgen docsgen-cli $(GOCC) run ./scripts/fiximports go mod tidy .PHONY: gen diff --git a/alertmanager/alerts.go b/alertmanager/alerts.go index fafe65408..195d1e7ab 100644 --- a/alertmanager/alerts.go +++ b/alertmanager/alerts.go @@ -3,7 +3,6 @@ package alertmanager import ( "bytes" "context" - "database/sql" "fmt" "math" "strings" @@ -13,6 +12,7 @@ import ( "github.com/dustin/go-humanize" cbor "github.com/ipfs/go-ipld-cbor" "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -346,7 +346,7 @@ func (al *alerts) getAddresses() ([]address.Address, []address.Address, error) { cfg := config.DefaultCurioConfig() err := al.db.QueryRow(al.ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { + if strings.Contains(err.Error(), pgx.ErrNoRows.Error()) { return nil, nil, xerrors.Errorf("missing layer '%s' ", layer) } return nil, nil, xerrors.Errorf("could not read layer '%s': %w", layer, err) @@ -731,7 +731,7 @@ func missingSectorCheck(al *alerts) { SectorID int64 `db:"sector_num"` } - err := al.db.Select(al.ctx, §ors, `SELECT miner_id, sector_num FROM sector_location WHERE sector_filetype = 2 GROUP BY miner_id, sector_num ORDER BY miner_id, sector_num`) + err := al.db.Select(al.ctx, §ors, `SELECT miner_id, sector_num FROM sector_location WHERE sector_filetype = ANY(ARRAY[2,8]) GROUP BY miner_id, sector_num ORDER BY miner_id, sector_num`) if err != nil { al.alertMap[Name].err = xerrors.Errorf("getting sealed sectors from database: %w", err) return diff --git a/alertmanager/plugin/slack_webhook.go b/alertmanager/plugin/slack_webhook.go index 118e92d4c..b524a036f 100644 --- a/alertmanager/plugin/slack_webhook.go +++ b/alertmanager/plugin/slack_webhook.go @@ -65,29 +65,60 @@ func (s *SlackWebhook) SendAlert(data *AlertPayload) error { // Iterate through the map to construct the remaining blocks for key, value := range data.Details { - // Split value into sentences by period followed by space + // Split value into sentences by period followed by space. sentences := strings.Split(value.(string), ". ") - formattedValue := fmt.Sprintf("• *%s*\n", key) - // Add a bullet point before each trimmed sentence + // Add the key as the header for each block. + baseFormattedValue := fmt.Sprintf("• *%s*\n", key) + currentFormattedValue := baseFormattedValue + + // Keep track of the character limit (3000) when adding sentences. for _, sentence := range sentences { - trimmedSentence := strings.TrimSpace(sentence) // Trim leading and trailing spaces + trimmedSentence := strings.TrimSpace(sentence) // Trim leading and trailing spaces. if trimmedSentence != "" { - formattedValue += fmt.Sprintf("• %s.\n", trimmedSentence) // Add period back and newline + // Add a bullet point and sentence, restoring the period and newline. + newSection := fmt.Sprintf("• %s.\n", trimmedSentence) + + // Check if adding this section exceeds the 3000-character limit. + if len(currentFormattedValue)+len(newSection) > 3000 { + // If limit exceeds, add the currentFormattedValue block to payload and start a new block. + payload.Blocks = append(payload.Blocks, + Block{ + Type: "section", + Text: &TextBlock{ + Type: "mrkdwn", + Text: currentFormattedValue, + }, + }, + Block{ + Type: "divider", + }, + ) + + // Start a new formatted value with the baseFormattedValue. + currentFormattedValue = baseFormattedValue + } + + // Append the newSection to the currentFormattedValue. + currentFormattedValue += newSection } } - payload.Blocks = append(payload.Blocks, - Block{ - Type: "section", - Text: &TextBlock{ - Type: "mrkdwn", - Text: formattedValue, + + // Add the last block if it contains any content. + if currentFormattedValue != baseFormattedValue { + payload.Blocks = append(payload.Blocks, + Block{ + Type: "section", + Text: &TextBlock{ + Type: "mrkdwn", + Text: currentFormattedValue, + }, }, - }, - Block{ - Type: "divider", - }, - ) + Block{ + Type: "divider", + }, + ) + } } // Marshal the payload to JSON @@ -163,7 +194,8 @@ func (s *SlackWebhook) SendAlert(data *AlertPayload) error { } }) if err != nil { - return fmt.Errorf("after %d retries,last error: %w", iter, err) + log.Errorw("Slack Webhook payload:", string(jsonData)) + return fmt.Errorf("after %d retries,last error: %w, %s", iter, err, string(jsonData)) } return nil } diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go index adc06a5d9..530e61f04 100644 --- a/cmd/curio/tasks/tasks.go +++ b/cmd/curio/tasks/tasks.go @@ -102,7 +102,6 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan machine := dependencies.ListenAddr prover := dependencies.Prover iStore := dependencies.IndexStore - pp := dependencies.SectorReader chainSched := chainsched.New(full) @@ -234,12 +233,13 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan { // Piece handling if cfg.Subsystems.EnableParkPiece { - parkPieceTask, err := piece2.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks) + parkPieceTask, err := piece2.NewParkPieceTask(db, must.One(slrLazy.Val()), stor, cfg.Subsystems.ParkPieceMaxTasks) if err != nil { return nil, err } cleanupPieceTask := piece2.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0) - activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask) + aggregateChunksTask := piece2.NewAggregateChunksTask(db, stor, must.One(slrLazy.Val())) + activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask, aggregateChunksTask) } } @@ -257,21 +257,27 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan } { + var sdeps cuhttp.ServiceDeps // Market tasks var dm *storage_market.CurioStorageDealMarket if cfg.Subsystems.EnableDealMarket { // Main market poller should run on all nodes - dm = storage_market.NewCurioStorageDealMarket(miners, db, cfg, si, full, as) + dm = storage_market.NewCurioStorageDealMarket(miners, db, cfg, must.One(dependencies.EthClient.Val()), si, full, as, must.One(slrLazy.Val())) err := dm.StartMarket(ctx) if err != nil { return nil, err } + sdeps.DealMarket = dm + if cfg.Subsystems.EnableCommP { commpTask := storage_market.NewCommpTask(dm, db, must.One(slrLazy.Val()), full, cfg.Subsystems.CommPMaxTasks) activeTasks = append(activeTasks, commpTask) } + aggTask := storage_market.NewAggregateTask(dm, db, must.One(slrLazy.Val()), lstor, full) + activeTasks = append(activeTasks, aggTask) + // PSD and Deal find task do not require many resources. They can run on all machines psdTask := storage_market.NewPSDTask(dm, db, sender, as, &cfg.Market.StorageMarketConfig.MK12, full) dealFindTask := storage_market.NewFindDealTask(dm, db, full, &cfg.Market.StorageMarketConfig.MK12) @@ -288,30 +294,48 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan if err != nil { return nil, err } - var sdeps cuhttp.ServiceDeps if cfg.Subsystems.EnablePDP { es := getSenderEth() sdeps.EthSender = es - pdp.NewWatcherCreate(db, must.One(dependencies.EthClient.Val()), chainSched) - pdp.NewWatcherRootAdd(db, must.One(dependencies.EthClient.Val()), chainSched) + ethClient := must.One(dependencies.EthClient.Val()) + + pdp.NewWatcherDataSetCreate(db, ethClient, chainSched) + pdp.NewWatcherPieceAdd(db, chainSched, ethClient) + pdp.NewWatcherDelete(db, chainSched) + pdp.NewWatcherPieceDelete(db, chainSched) - pdpProveTask := pdp.NewProveTask(chainSched, db, must.One(dependencies.EthClient.Val()), dependencies.Chain, es, dependencies.CachedPieceReader) - pdpNextProvingPeriodTask := pdp.NewNextProvingPeriodTask(db, must.One(dependencies.EthClient.Val()), dependencies.Chain, chainSched, es) - pdpInitProvingPeriodTask := pdp.NewInitProvingPeriodTask(db, must.One(dependencies.EthClient.Val()), dependencies.Chain, chainSched, es) + pdpProveTask := pdp.NewProveTask(chainSched, db, ethClient, dependencies.Chain, es, dependencies.CachedPieceReader, iStore) + pdpNextProvingPeriodTask := pdp.NewNextProvingPeriodTask(db, ethClient, dependencies.Chain, chainSched, es) + pdpInitProvingPeriodTask := pdp.NewInitProvingPeriodTask(db, ethClient, dependencies.Chain, chainSched, es) pdpNotifTask := pdp.NewPDPNotifyTask(db) - activeTasks = append(activeTasks, pdpNotifTask, pdpProveTask, pdpNextProvingPeriodTask, pdpInitProvingPeriodTask) + + addProofSetTask := pdp.NewPDPTaskAddDataSet(db, es, ethClient, full) + pdpAddRoot := pdp.NewPDPTaskAddPiece(db, es, ethClient) + pdpDelRoot := pdp.NewPDPTaskDeletePiece(db, es, ethClient) + pdpDelProofSetTask := pdp.NewPDPTaskDeleteDataSet(db, es, ethClient, full) + + pdpAggregateTask := pdp.NewAggregatePDPDealTask(db, sc) + pdpCache := pdp.NewTaskPDPSaveCache(db, dependencies.CachedPieceReader, iStore) + commPTask := pdp.NewPDPCommpTask(db, sc, cfg.Subsystems.CommPMaxTasks) + + activeTasks = append(activeTasks, pdpNotifTask, pdpProveTask, pdpNextProvingPeriodTask, pdpInitProvingPeriodTask, commPTask, pdpAddRoot, addProofSetTask, pdpAggregateTask, pdpCache, pdpDelRoot, pdpDelProofSetTask) } idxMax := taskhelp.Max(cfg.Subsystems.IndexingMaxTasks) - indexingTask := indexing.NewIndexingTask(db, sc, iStore, pp, cfg, idxMax) - ipniTask := indexing.NewIPNITask(db, sc, iStore, pp, cfg, idxMax) - activeTasks = append(activeTasks, ipniTask, indexingTask) + indexingTask := indexing.NewIndexingTask(db, sc, iStore, dependencies.SectorReader, dependencies.CachedPieceReader, cfg, idxMax) + ipniTask := indexing.NewIPNITask(db, sc, dependencies.SectorReader, dependencies.CachedPieceReader, cfg, idxMax) + pdpIdxTask := indexing.NewPDPIndexingTask(db, sc, iStore, dependencies.CachedPieceReader, cfg, idxMax) + pdpIPNITask := indexing.NewPDPIPNITask(db, sc, dependencies.CachedPieceReader, cfg, idxMax) + activeTasks = append(activeTasks, ipniTask, indexingTask, pdpIdxTask, pdpIPNITask) if cfg.HTTP.Enable { - err = cuhttp.StartHTTPServer(ctx, dependencies, &sdeps, dm) + if !cfg.Subsystems.EnableDealMarket { + return nil, xerrors.New("deal market must be enabled on HTTP server") + } + err = cuhttp.StartHTTPServer(ctx, dependencies, &sdeps) if err != nil { return nil, xerrors.Errorf("failed to start the HTTP server: %w", err) } @@ -321,6 +345,9 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan amTask := alertmanager.NewAlertTask(full, db, cfg.Alerting, dependencies.Al) activeTasks = append(activeTasks, amTask) + pcl := gc.NewPieceCleanupTask(db, iStore) + activeTasks = append(activeTasks, pcl) + log.Infow("This Curio instance handles", "miner_addresses", miners, "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) diff --git a/cmd/pdptool/main.go b/cmd/pdptool/main.go index 15428bae6..c0699a3fa 100644 --- a/cmd/pdptool/main.go +++ b/cmd/pdptool/main.go @@ -192,6 +192,8 @@ var pingCmd = &cli.Command{ return err } + serviceURL = serviceURL + "/market" + // Append /pdp/ping to the service URL pingURL := serviceURL + "/pdp/ping" @@ -537,6 +539,7 @@ var pieceUploadCmd = &cli.Command{ } serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" jwtToken := cctx.String("jwt-token") notifyURL := cctx.String("notify-url") serviceName := cctx.String("service-name") @@ -692,6 +695,7 @@ var uploadFileCmd = &cli.Command{ } serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" jwtToken := cctx.String("jwt-token") serviceName := cctx.String("service-name") hashType := cctx.String("hash-type") @@ -889,6 +893,7 @@ var createProofSetCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" serviceName := cctx.String("service-name") recordKeeper := cctx.String("pdp-service-contract") extraDataHexStr := cctx.String("extra-data") @@ -982,6 +987,7 @@ var getProofSetStatusCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" serviceName := cctx.String("service-name") txHash := cctx.String("tx-hash") @@ -1091,6 +1097,7 @@ var getProofSetCmd = &cli.Command{ } serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" serviceName := cctx.String("service-name") // Create the JWT token @@ -1195,6 +1202,7 @@ var addRootsCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" serviceName := cctx.String("service-name") proofSetID := cctx.Uint64("proof-set-id") rootInputs := cctx.StringSlice("root") @@ -1441,6 +1449,7 @@ var removeRootsCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { serviceURL := cctx.String("service-url") + serviceURL = serviceURL + "/market" serviceName := cctx.String("service-name") proofSetID := cctx.Uint64("proof-set-id") rootID := cctx.Uint64("root-id") diff --git a/cmd/sptool/main.go b/cmd/sptool/main.go index dd3d00e9e..6fccb4c09 100644 --- a/cmd/sptool/main.go +++ b/cmd/sptool/main.go @@ -101,5 +101,6 @@ var toolboxCmd = &cli.Command{ Subcommands: []*cli.Command{ sparkCmd, mk12Clientcmd, + mk20Clientcmd, }, } diff --git a/cmd/sptool/toolbox_deal_client.go b/cmd/sptool/toolbox_deal_client.go index 3a3e27790..9b1e5abca 100644 --- a/cmd/sptool/toolbox_deal_client.go +++ b/cmd/sptool/toolbox_deal_client.go @@ -5,10 +5,13 @@ import ( "bytes" "context" "crypto/rand" + "crypto/sha256" + "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" + "io" "net/http" "net/url" "os" @@ -29,21 +32,28 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/mitchellh/go-homedir" "github.com/multiformats/go-multiaddr" + "github.com/oklog/ulid" "github.com/urfave/cli/v2" "golang.org/x/term" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" + commp "github.com/filecoin-project/go-fil-commp-hashhash" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v16/verifreg" "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/keystore" + "github.com/filecoin-project/curio/lib/testutils" mk12_libp2p "github.com/filecoin-project/curio/market/libp2p" "github.com/filecoin-project/curio/market/mk12" + "github.com/filecoin-project/curio/market/mk20" + "github.com/filecoin-project/curio/market/mk20/client" - "github.com/filecoin-project/lotus/api" + lapi "github.com/filecoin-project/lotus/api" chain_types "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" lcli "github.com/filecoin-project/lotus/cli" @@ -606,7 +616,7 @@ func dealProposal(ctx context.Context, n *Node, clientAddr address.Address, root return nil, err } - sig, err := n.Wallet.WalletSign(ctx, clientAddr, buf, api.MsgMeta{Type: api.MTDealProposal}) + sig, err := n.Wallet.WalletSign(ctx, clientAddr, buf, lapi.MsgMeta{Type: lapi.MTDealProposal}) if err != nil { return nil, xerrors.Errorf("wallet sign failed: %w", err) } @@ -1300,7 +1310,7 @@ var walletSign = &cli.Command{ return err } - sig, err := n.Wallet.WalletSign(ctx, addr, msg, api.MsgMeta{Type: api.MTUnknown}) + sig, err := n.Wallet.WalletSign(ctx, addr, msg, lapi.MsgMeta{Type: lapi.MTUnknown}) if err != nil { return err } @@ -1411,7 +1421,7 @@ var dealStatusCmd = &cli.Command{ return fmt.Errorf("getting uuid bytes: %w", err) } - sig, err := n.Wallet.WalletSign(ctx, walletAddr, uuidBytes, api.MsgMeta{Type: api.MTDealProposal}) + sig, err := n.Wallet.WalletSign(ctx, walletAddr, uuidBytes, lapi.MsgMeta{Type: lapi.MTDealProposal}) if err != nil { return fmt.Errorf("signing uuid bytes: %w", err) } @@ -1573,3 +1583,952 @@ var dealStatusCmd = &cli.Command{ return nil }, } + +var mk20Clientcmd = &cli.Command{ + Name: "mk20-client", + Usage: "mk20 client for Curio", + Flags: []cli.Flag{ + mk12_client_repo, + }, + Subcommands: []*cli.Command{ + initCmd, + comm2Cmd, + mk20DealCmd, + mk20PDPDealCmd, + mk20ClientMakeAggregateCmd, + mk20ClientUploadCmd, + mk20ClientChunkUploadCmd, + mk20PDPDealStatusCmd, + }, +} + +var comm2Cmd = &cli.Command{ + Name: "commp", + Usage: "", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return fmt.Errorf("usage: commP ") + } + + inPath := cctx.Args().Get(0) + + rdr, err := os.Open(inPath) + if err != nil { + return err + } + defer rdr.Close() //nolint:errcheck + + stat, err := os.Stat(inPath) + if err != nil { + return err + } + + wr := new(commp.Calc) + _, err = io.CopyBuffer(wr, rdr, make([]byte, 2<<20)) + if err != nil { + return fmt.Errorf("copy into commp writer: %w", err) + } + + digest, _, err := wr.Digest() + if err != nil { + return fmt.Errorf("generating digest failed: %w", err) + } + + commp, err := commcidv2.NewSha2CommP(uint64(stat.Size()), digest) + if err != nil { + return fmt.Errorf("computing commP failed: %w", err) + } + + fmt.Println("CommP CID: ", commp.PCidV2().String()) + fmt.Println("Car file size: ", stat.Size()) + return nil + }, +} + +var mk20DealCmd = &cli.Command{ + Name: "deal", + Usage: "Make a mk20 deal with Curio", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "http-url", + Usage: "http url to CAR file", + }, + &cli.StringSliceFlag{ + Name: "http-headers", + Usage: "http headers to be passed with the request (e.g key=value)", + }, + &cli.StringFlag{ + Name: "provider", + Usage: "storage provider on-chain address", + Required: true, + }, + &cli.StringFlag{ + Name: "pcidv2", + Usage: "pcidv2 of the CAR file", + Required: true, + }, + &cli.IntFlag{ + Name: "duration", + Usage: "duration of the deal in epochs", + Value: 518400, // default is 2880 * 180 == 180 days + }, + &cli.StringFlag{ + Name: "contract-address", + Usage: "contract address of the deal", + Required: true, + }, + &cli.StringFlag{ + Name: "contract-verify-method", + Usage: "contract verify method of the deal", + Required: true, + }, + &cli.Uint64Flag{ + Name: "allocation", + Usage: "allocation id of the deal", + }, + &cli.BoolFlag{ + Name: "indexing", + Usage: "indicates that an deal should be indexed", + Value: true, + }, + &cli.StringFlag{ + Name: "wallet", + Usage: "wallet address to be used to initiate the deal", + }, + &cli.BoolFlag{ + Name: "announce", + Usage: "indicates that deal should be announced to the IPNI(Network Indexer)", + Value: true, + }, + &cli.StringFlag{ + Name: "aggregate", + Usage: "aggregate file path for the deal", + }, + &cli.BoolFlag{ + Name: "put", + Usage: "used HTTP put as data source", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + n, err := Setup(cctx.String(mk12_client_repo.Name)) + if err != nil { + return err + } + + api, closer, err := lcli.GetGatewayAPIV1(cctx) + if err != nil { + return fmt.Errorf("cant setup gateway connection: %w", err) + } + defer closer() + + walletAddr, err := n.GetProvidedOrDefaultWallet(ctx, cctx.String("wallet")) + if err != nil { + return err + } + + log.Debugw("selected wallet", "wallet", walletAddr) + + maddr, err := address.NewFromString(cctx.String("provider")) + if err != nil { + return err + } + + keyType := client.KeyFromClientAddress(walletAddr) + pkey := walletAddr.Bytes() + ts := time.Now().UTC().Truncate(time.Hour) + msg := sha256.Sum256(bytes.Join([][]byte{pkey, []byte(ts.Format(time.RFC3339))}, []byte{})) + + signature, err := n.Wallet.WalletSign(ctx, walletAddr, msg[:], lapi.MsgMeta{Type: lapi.MTDealProposal}) + if err != nil { + return xerrors.Errorf("signing message: %w", err) + } + + sig, err := signature.MarshalBinary() + if err != nil { + return xerrors.Errorf("marshaling signature: %w", err) + } + + authHeader := fmt.Sprintf("CurioAuth %s:%s:%s", + keyType, + base64.StdEncoding.EncodeToString(pkey), + base64.StdEncoding.EncodeToString(sig), + ) + + minfo, err := api.StateMinerInfo(ctx, maddr, chain_types.EmptyTSK) + if err != nil { + return err + } + if minfo.PeerId == nil { + return xerrors.Errorf("storage provider %s has no peer ID set on-chain", maddr) + } + + var maddrs []multiaddr.Multiaddr + for _, mma := range minfo.Multiaddrs { + ma, err := multiaddr.NewMultiaddrBytes(mma) + if err != nil { + return xerrors.Errorf("storage provider %s had invalid multiaddrs in their info: %w", maddr, err) + } + maddrs = append(maddrs, ma) + } + if len(maddrs) == 0 { + return xerrors.Errorf("storage provider %s has no multiaddrs set on-chain", maddr) + } + + addrInfo := &peer.AddrInfo{ + ID: *minfo.PeerId, + Addrs: maddrs, + } + + log.Debugw("found storage provider", "id", addrInfo.ID, "multiaddrs", addrInfo.Addrs, "addr", maddr) + + var hurls []*url.URL + + for _, ma := range addrInfo.Addrs { + hurl, err := maurl.ToURL(ma) + if err != nil { + return xerrors.Errorf("failed to convert multiaddr %s to URL: %w", ma, err) + } + if hurl.Scheme == "ws" { + hurl.Scheme = "http" + } + if hurl.Scheme == "wss" { + hurl.Scheme = "https" + } + log.Debugw("converted multiaddr to URL", "url", hurl, "multiaddr", ma.String()) + hurls = append(hurls, hurl) + } + + commp := cctx.String("pcidv2") + pieceCid, err := cid.Parse(commp) + if err != nil { + return xerrors.Errorf("parsing pcidv2 '%s': %w", commp, err) + } + + var headers http.Header + + for _, header := range cctx.StringSlice("http-headers") { + sp := strings.Split(header, "=") + if len(sp) != 2 { + return xerrors.Errorf("malformed http header: %s", header) + } + headers.Add(sp[0], sp[1]) + } + + var d mk20.DataSource + + if cctx.IsSet("aggregate") { + d = mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Aggregate: &mk20.FormatAggregate{ + Type: mk20.AggregateTypeV1, + }, + }, + } + + var pieces []mk20.DataSource + + log.Debugw("using aggregate data source", "aggregate", cctx.String("aggregate")) + // Read file line by line + loc, err := homedir.Expand(cctx.String("aggregate")) + if err != nil { + return err + } + file, err := os.Open(loc) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Split(line, "\t") + if len(parts) != 2 { + return fmt.Errorf("invalid line format. Expected pieceCidV2, url at %s", line) + } + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("empty column value in the input file at %s", line) + } + + pieceCid, err := cid.Parse(parts[0]) + if err != nil { + return fmt.Errorf("failed to parse CID: %w", err) + } + + url, err := url.Parse(parts[1]) + if err != nil { + return fmt.Errorf("failed to parse url: %w", err) + } + + pieces = append(pieces, mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Car: &mk20.FormatCar{}, + }, + SourceHTTP: &mk20.DataSourceHTTP{ + URLs: []mk20.HttpUrl{ + { + URL: url.String(), + Priority: 0, + Fallback: true, + }, + }, + }, + }) + + if err := scanner.Err(); err != nil { + return err + } + } + d.SourceAggregate = &mk20.DataSourceAggregate{ + Pieces: pieces, + } + } else { + if !cctx.IsSet("http-url") { + if cctx.Bool("put") { + d = mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Car: &mk20.FormatCar{}, + }, + SourceHttpPut: &mk20.DataSourceHttpPut{}, + } + } else { + d = mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Car: &mk20.FormatCar{}, + }, + SourceOffline: &mk20.DataSourceOffline{}, + } + } + } else { + url, err := url.Parse(cctx.String("http-url")) + if err != nil { + return xerrors.Errorf("parsing http url: %w", err) + } + d = mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Car: &mk20.FormatCar{}, + }, + SourceHTTP: &mk20.DataSourceHTTP{ + URLs: []mk20.HttpUrl{ + { + URL: url.String(), + Headers: headers, + Priority: 0, + Fallback: true, + }, + }, + }, + } + } + } + + p := mk20.Products{ + DDOV1: &mk20.DDOV1{ + Provider: maddr, + PieceManager: walletAddr, + Duration: abi.ChainEpoch(cctx.Int64("duration")), + ContractAddress: cctx.String("contract-address"), + ContractVerifyMethod: cctx.String("contract-verify-method"), + ContractVerifyMethodParams: []byte("test bytes"), + }, + RetrievalV1: &mk20.RetrievalV1{ + Indexing: cctx.Bool("indexing"), + AnnouncePayload: cctx.Bool("announce"), + }, + } + + if cctx.Uint64("allocation") != 0 { + alloc := verifreg.AllocationId(cctx.Uint64("allocation")) + p.DDOV1.AllocationId = &alloc + } + + id, err := mk20.NewULID() + if err != nil { + return err + } + log.Debugw("generated deal id", "id", id) + + deal := mk20.Deal{ + Identifier: id, + Client: walletAddr.String(), + Data: &d, + Products: p, + } + + log.Debugw("deal", "deal", deal) + + body, err := json.Marshal(deal) + if err != nil { + return err + } + + // Try to request all URLs one by one and exit after first success + for _, u := range hurls { + s := u.String() + "/market/mk20/store" + log.Debugw("trying to send request to", "url", u.String()) + req, err := http.NewRequest("POST", s, bytes.NewReader(body)) + if err != nil { + return xerrors.Errorf("failed to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", authHeader) + log.Debugw("Headers", "headers", req.Header) + resp, err := http.DefaultClient.Do(req) + if err != nil { + log.Warnw("failed to send request", "url", s, "error", err) + continue + } + if resp.StatusCode != http.StatusOK { + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return xerrors.Errorf("failed to read response body: %w", err) + } + log.Warnw("failed to send request", "url", s, "status", resp.StatusCode, "body", string(respBody)) + continue + } + return nil + } + return xerrors.Errorf("failed to send request to any of the URLs") + }, +} + +var mk20ClientMakeAggregateCmd = &cli.Command{ + Name: "aggregate", + Usage: "Create a new aggregate from a list of CAR files", + Flags: []cli.Flag{ + &cli.StringSliceFlag{ + Name: "files", + Usage: "list of CAR files to aggregate", + Required: true, + }, + &cli.Uint64Flag{ + Name: "piece-size", + Usage: "piece size of the aggregate", + Required: true, + }, + &cli.BoolFlag{ + Name: "out", + Usage: "output the aggregate file", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + size := abi.PaddedPieceSize(cctx.Uint64("piece-size")) + files := cctx.StringSlice("files") + out := cctx.Bool("out") + pcid, err := testutils.CreateAggregateFromCars(files, size, out) + if err != nil { + return err + } + fmt.Println("CommP CID: ", pcid.String()) + return nil + }, +} + +var mk20ClientChunkUploadCmd = &cli.Command{ + Name: "chunk-upload", + Usage: "Upload a file in chunks to the storage provider", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "provider", + Usage: "storage provider on-chain address", + Required: true, + }, + &cli.StringFlag{ + Name: "deal", + Usage: "deal id to upload to", + Required: true, + }, + &cli.StringFlag{ + Name: "chunk-size", + Usage: "chunk size to be used for the upload", + Value: "4 MiB", + }, + &cli.StringFlag{ + Name: "wallet", + Usage: "wallet address to be used to initiate the deal", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + n, err := Setup(cctx.String(mk12_client_repo.Name)) + if err != nil { + return err + } + + walletAddr, err := n.GetProvidedOrDefaultWallet(ctx, cctx.String("wallet")) + if err != nil { + return err + } + + maddr, err := url.Parse(cctx.String("provider")) + if err != nil { + return err + } + + pclient := client.NewClient(maddr.String(), walletAddr, n.Wallet) + + log.Debugw("selected wallet", "wallet", walletAddr) + + if cctx.NArg() != 1 { + return xerrors.Errorf("must provide a single file to upload") + } + + file := cctx.Args().First() + log.Debugw("uploading file", "file", file) + + chunkSizeStr := cctx.String("chunk-size") + chunkSizem, err := humanize.ParseBytes(chunkSizeStr) + if err != nil { + return xerrors.Errorf("parsing chunk size: %w", err) + } + + if chunkSizem == 0 { + return xerrors.Errorf("invalid chunk size: %s", chunkSizeStr) + } + + // Verify chunk size is power of 2 + if chunkSizem&(chunkSizem-1) != 0 { + return xerrors.Errorf("chunk size must be power of 2") + } + + chunkSize := int64(chunkSizem) + + dealid, err := ulid.Parse(cctx.String("deal")) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + + f, err := os.OpenFile(file, os.O_RDONLY, 0644) + if err != nil { + return xerrors.Errorf("opening file: %w", err) + } + + defer func() { + _ = f.Close() + }() + + stat, err := f.Stat() + if err != nil { + return xerrors.Errorf("stat file: %w", err) + } + + size := stat.Size() + if size == 0 { + return xerrors.Errorf("file size is 0") + } + + if size < chunkSize { + chunkSize = size + } + + err = pclient.DealChunkedUpload(ctx, dealid.String(), size, chunkSize, f) + if err != nil { + return xerrors.Errorf("uploading file: %w", err) + } + + return nil + }, +} + +var mk20PDPDealCmd = &cli.Command{ + Name: "pdp-deal", + Usage: "Make a mk20 PDP deal with Curio", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "http-url", + Usage: "http url to CAR file", + }, + &cli.StringSliceFlag{ + Name: "http-headers", + Usage: "http headers to be passed with the request (e.g key=value)", + }, + &cli.StringFlag{ + Name: "provider", + Usage: "PDP providers's URL", + Required: true, + }, + &cli.StringFlag{ + Name: "pcidv2", + Usage: "pcidv2 of the CAR file", + }, + &cli.StringFlag{ + Name: "wallet", + Usage: "wallet address to be used to initiate the deal", + }, + &cli.StringFlag{ + Name: "aggregate", + Usage: "aggregate file path for the deal", + }, + &cli.BoolFlag{ + Name: "put", + Usage: "used HTTP put as data source", + }, + &cli.BoolFlag{ + Name: "add-piece", + Usage: "add piece", + }, + &cli.BoolFlag{ + Name: "add-dataset", + Usage: "add dataset", + }, + &cli.BoolFlag{ + Name: "remove-piece", + Usage: "remove piece", + }, + &cli.BoolFlag{ + Name: "remove-dataset", + Usage: "remove dataset", + }, + &cli.StringFlag{ + Name: "record-keeper", + Usage: "record keeper address", + }, + &cli.Uint64SliceFlag{ + Name: "piece-id", + Usage: "root IDs", + }, + &cli.Uint64Flag{ + Name: "dataset-id", + Usage: "dataset IDs", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + n, err := Setup(cctx.String(mk12_client_repo.Name)) + if err != nil { + return err + } + + walletAddr, err := n.GetProvidedOrDefaultWallet(ctx, cctx.String("wallet")) + if err != nil { + return err + } + + maddr, err := url.Parse(cctx.String("provider")) + if err != nil { + return err + } + + pclient := client.NewClient(maddr.String(), walletAddr, n.Wallet) + + log.Debugw("selected wallet", "wallet", walletAddr) + + addRoot := cctx.Bool("add-piece") + addProofset := cctx.Bool("add-dataset") + removeRoot := cctx.Bool("remove-piece") + removeProofset := cctx.Bool("remove-dataset") + recordKeeper := cctx.String("record-keeper") + rootIDs := cctx.Uint64Slice("piece-id") + proofSetSet := cctx.IsSet("dataset-id") + proofsetID := cctx.Uint64("dataset-id") + if !addRoot && !removeRoot && !addProofset && !removeProofset { + return xerrors.Errorf("at least one of --add-root, --remove-root, --add-proofset, --remove-proofset must be set") + } + + if btoi(addRoot)+btoi(addProofset)+btoi(removeRoot)+btoi(removeProofset) > 1 { + return xerrors.Errorf("only one of --add-root, --remove-root, --add-proofset, --remove-proofset can be set") + } + + if addRoot { + commp := cctx.String("pcidv2") + pieceCid, err := cid.Parse(commp) + if err != nil { + return xerrors.Errorf("parsing pcidv2 '%s': %w", commp, err) + } + + var headers http.Header + + for _, header := range cctx.StringSlice("http-headers") { + sp := strings.Split(header, "=") + if len(sp) != 2 { + return xerrors.Errorf("malformed http header: %s", header) + } + headers.Add(sp[0], sp[1]) + } + + if cctx.IsSet("aggregate") { + var pieces []mk20.DataSource + + log.Debugw("using aggregate data source", "aggregate", cctx.String("aggregate")) + // Read file line by line + loc, err := homedir.Expand(cctx.String("aggregate")) + if err != nil { + return err + } + file, err := os.Open(loc) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Split(line, "\t") + if len(parts) != 2 { + return fmt.Errorf("invalid line format. Expected pieceCidV2, url at %s", line) + } + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("empty column value in the input file at %s", line) + } + + pieceCid, err := cid.Parse(parts[0]) + if err != nil { + return fmt.Errorf("failed to parse CID: %w", err) + } + + url, err := url.Parse(parts[1]) + if err != nil { + return fmt.Errorf("failed to parse url: %w", err) + } + + pieces = append(pieces, mk20.DataSource{ + PieceCID: pieceCid, + Format: mk20.PieceDataFormat{ + Car: &mk20.FormatCar{}, + }, + SourceHTTP: &mk20.DataSourceHTTP{ + URLs: []mk20.HttpUrl{ + { + URL: url.String(), + Priority: 0, + Fallback: true, + }, + }, + }, + }) + + if err := scanner.Err(); err != nil { + return err + } + } + id, err := pclient.AddPieceWithAggregate(ctx, walletAddr.String(), recordKeeper, nil, &proofsetID, pieceCid, true, false, mk20.AggregateTypeNone, pieces) + if err != nil { + return xerrors.Errorf("failed to add piece: %w", err) + } + fmt.Println("Add piece requested with Deal:", id) + return nil + } else { + if !cctx.IsSet("http-url") { + id, err := pclient.AddPieceWithPut(ctx, walletAddr.String(), recordKeeper, nil, &proofsetID, pieceCid, true, false, true, true, mk20.AggregateTypeNone, nil) + if err != nil { + return xerrors.Errorf("failed to add piece: %w", err) + } + fmt.Println("Add piece requested with Deal:", id) + return nil + } else { + url, err := url.Parse(cctx.String("http-url")) + if err != nil { + return xerrors.Errorf("parsing http url: %w", err) + } + h := []mk20.HttpUrl{ + { + URL: url.String(), + Headers: headers, + Priority: 0, + Fallback: true, + }, + } + id, err := pclient.AddPieceWithHTTP(ctx, walletAddr.String(), recordKeeper, nil, &proofsetID, pieceCid, true, false, true, true, mk20.AggregateTypeNone, nil, h) + if err != nil { + return xerrors.Errorf("failed to add piece: %w", err) + } + fmt.Println("Add piece requested with Deal:", id) + return nil + } + } + } + + if removeRoot { + if !proofSetSet { + return xerrors.Errorf("proofset-id must be set when removing a root") + } + id, err := pclient.RemovePiece(ctx, walletAddr.String(), recordKeeper, nil, &proofsetID, rootIDs) + if err != nil { + return xerrors.Errorf("failed to remove piece: %w", err) + } + fmt.Println("Piece removal requested with deal ID:", id.String()) + return nil + } + + if addProofset { + id, err := pclient.CreateDataSet(ctx, walletAddr.String(), recordKeeper, nil) + if err != nil { + return xerrors.Errorf("failed to create dataset: %w", err) + } + fmt.Println("Dataset creation requested with deal ID:", id.String()) + return nil + } + + if removeProofset { + if !proofSetSet { + return xerrors.Errorf("proofset-id must be set when deleting proof-set") + } + id, err := pclient.RemoveDataSet(ctx, walletAddr.String(), recordKeeper, nil, &proofsetID) + if err != nil { + return xerrors.Errorf("failed to remove dataset: %w", err) + } + fmt.Println("Dataset removal requested with deal ID:", id.String()) + return nil + } + + return xerrors.Errorf("failed to send a PDP deal") + }, +} + +func btoi(b bool) int { + if b { + return 1 + } + return 0 +} + +var mk20ClientUploadCmd = &cli.Command{ + Name: "upload", + Usage: "Upload a file to the storage provider", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "provider", + Usage: "PDP providers's URL", + Required: true, + }, + &cli.StringFlag{ + Name: "deal", + Usage: "deal id to upload to", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + n, err := Setup(cctx.String(mk12_client_repo.Name)) + if err != nil { + return err + } + + walletAddr, err := n.GetProvidedOrDefaultWallet(ctx, cctx.String("wallet")) + if err != nil { + return err + } + + maddr, err := url.Parse(cctx.String("provider")) + if err != nil { + return err + } + + pclient := client.NewClient(maddr.String(), walletAddr, n.Wallet) + + log.Debugw("selected wallet", "wallet", walletAddr) + + if cctx.NArg() != 1 { + return xerrors.Errorf("must provide a single file to upload") + } + file := cctx.Args().First() + log.Debugw("uploading file", "file", file) + + dealid, err := ulid.Parse(cctx.String("deal")) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + + f, err := os.OpenFile(file, os.O_RDONLY, 0644) + if err != nil { + return xerrors.Errorf("opening file: %w", err) + } + + defer func() { + _ = f.Close() + }() + + stat, err := f.Stat() + if err != nil { + return xerrors.Errorf("stat file: %w", err) + } + + size := stat.Size() + if size == 0 { + return xerrors.Errorf("file size is 0") + } + + err = pclient.DealUploadSerial(ctx, dealid.String(), f) + if err != nil { + return xerrors.Errorf("uploading file: %w", err) + } + + err = pclient.DealUploadSerialFinalize(ctx, dealid.String(), nil) + if err != nil { + return xerrors.Errorf("finalizing the upload: %w", err) + } + + return nil + }, +} + +var mk20PDPDealStatusCmd = &cli.Command{ + Name: "deal-status", + Usage: "Get status of a Mk20 deal", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "provider", + Usage: "PDP providers's URL", + Required: true, + }, + &cli.StringFlag{ + Name: "id", + Usage: "deal id", + Required: true, + }, + &cli.StringFlag{ + Name: "wallet", + Usage: "wallet address to be used to initiate the deal", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + n, err := Setup(cctx.String(mk12_client_repo.Name)) + if err != nil { + return err + } + + walletAddr, err := n.GetProvidedOrDefaultWallet(ctx, cctx.String("wallet")) + if err != nil { + return err + } + + maddr, err := url.Parse(cctx.String("provider")) + if err != nil { + return err + } + + pclient := client.NewClient(maddr.String(), walletAddr, n.Wallet) + + status, err := pclient.DealStatus(ctx, cctx.String("id")) + if err != nil { + return xerrors.Errorf("getting deal status: %w", err) + } + + if status.PDPV1 != nil { + fmt.Println("PDP Status:") + fmt.Println("State:", status.PDPV1.State) + fmt.Println("Error:", status.PDPV1.ErrorMsg) + } + + if status.DDOV1 != nil { + fmt.Println("PDP Status:") + fmt.Println("State:", status.DDOV1.State) + fmt.Println("Error:", status.DDOV1.ErrorMsg) + } + + return nil + }, +} diff --git a/cmd/sptool/toolbox_deal_tools.go b/cmd/sptool/toolbox_deal_tools.go index fcaf44281..b8dcddaf6 100644 --- a/cmd/sptool/toolbox_deal_tools.go +++ b/cmd/sptool/toolbox_deal_tools.go @@ -10,7 +10,6 @@ import ( "path" "strconv" "strings" - "time" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" @@ -225,7 +224,9 @@ var commpCmd = &cli.Command{ if err != nil { return err } - defer rdr.Close() //nolint:errcheck + defer func() { + _ = rdr.Close() + }() w := &writer.Writer{} _, err = io.CopyBuffer(w, rdr, make([]byte, writer.CommPBuf)) @@ -286,7 +287,7 @@ var generateRandCar = &cli.Command{ cs := cctx.Int64("chunksize") ml := cctx.Int("maxlinks") - rf, err := testutils.CreateRandomFile(outPath, time.Now().Unix(), size) + rf, err := testutils.CreateRandomTmpFile(outPath, size) if err != nil { return err } diff --git a/cuhttp/server.go b/cuhttp/server.go index ca092d3d9..c315f6f75 100644 --- a/cuhttp/server.go +++ b/cuhttp/server.go @@ -25,7 +25,6 @@ import ( ipni_provider "github.com/filecoin-project/curio/market/ipni/ipni-provider" "github.com/filecoin-project/curio/market/libp2p" "github.com/filecoin-project/curio/market/retrieval" - "github.com/filecoin-project/curio/pdp" "github.com/filecoin-project/curio/tasks/message" storage_market "github.com/filecoin-project/curio/tasks/storage-market" ) @@ -52,11 +51,11 @@ func secureHeaders(csp string) func(http.Handler) http.Handler { case "off": // Do nothing case "self": - w.Header().Set("Content-Security-Policy", "default-src 'self'") + w.Header().Set("Content-Security-Policy", "default-src 'self'; img-src 'self' data: blob:") case "inline": fallthrough default: - w.Header().Set("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'") + w.Header().Set("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; img-src 'self' data: blob:") } next.ServeHTTP(w, r) @@ -137,10 +136,11 @@ func isWebSocketUpgrade(r *http.Request) bool { } type ServiceDeps struct { - EthSender *message.SenderETH + EthSender *message.SenderETH + DealMarket *storage_market.CurioStorageDealMarket } -func StartHTTPServer(ctx context.Context, d *deps.Deps, sd *ServiceDeps, dm *storage_market.CurioStorageDealMarket) error { +func StartHTTPServer(ctx context.Context, d *deps.Deps, sd *ServiceDeps) error { cfg := d.Cfg.HTTP // Setup the Chi router for more complex routing (if needed in the future) @@ -182,7 +182,9 @@ func StartHTTPServer(ctx context.Context, d *deps.Deps, sd *ServiceDeps, dm *sto _, _ = fmt.Fprintf(w, "Service is up and running") }) - chiRouter, err = attachRouters(ctx, chiRouter, d, sd, dm) + // TODO: Attach a info page here with details about all the service and endpoints + + chiRouter, err = attachRouters(ctx, chiRouter, d, sd) if err != nil { return xerrors.Errorf("failed to attach routers: %w", err) } @@ -276,7 +278,7 @@ func (c cache) Delete(ctx context.Context, key string) error { var _ autocert.Cache = cache{} -func attachRouters(ctx context.Context, r *chi.Mux, d *deps.Deps, sd *ServiceDeps, dm *storage_market.CurioStorageDealMarket) (*chi.Mux, error) { +func attachRouters(ctx context.Context, r *chi.Mux, d *deps.Deps, sd *ServiceDeps) (*chi.Mux, error) { // Attach retrievals rp := retrieval.NewRetrievalProvider(ctx, d.DB, d.IndexStore, d.CachedPieceReader) retrieval.Router(r, rp) @@ -294,13 +296,13 @@ func attachRouters(ctx context.Context, r *chi.Mux, d *deps.Deps, sd *ServiceDep rd := libp2p.NewRedirector(d.DB) libp2p.Router(r, rd) - if sd.EthSender != nil { - pdsvc := pdp.NewPDPService(d.DB, d.LocalStore, must.One(d.EthClient.Get()), d.Chain, sd.EthSender) - pdp.Routes(r, pdsvc) - } + //if sd.EthSender != nil { + // pdsvc := pdp.NewPDPService(d.DB, d.LocalStore, must.One(d.EthClient.Get()), d.Chain, sd.EthSender) + // pdp.Routes(r, pdsvc) + //} // Attach the market handler - dh, err := mhttp.NewMarketHandler(d.DB, d.Cfg, dm) + dh, err := mhttp.NewMarketHandler(d.DB, d.Cfg, sd.DealMarket, must.One(d.EthClient.Get()), d.Chain, sd.EthSender, d.LocalStore) if err != nil { return nil, xerrors.Errorf("failed to create new market handler: %w", err) } diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index 67848dd3b..53de761e0 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -1095,6 +1095,71 @@ CIDGravity filters will not be applied to deals associated with that miner ID.`, Default behaviors is to reject the deals (Default: false)`, }, }, + "MK20Config": { + { + Name: "ExpectedPoRepSealDuration", + Type: "time.Duration", + + Comment: `ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector +This will be used to fail the deals which cannot be sealed on time. +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "8h0m0s")`, + }, + { + Name: "ExpectedSnapSealDuration", + Type: "time.Duration", + + Comment: `ExpectedSnapSealDuration is the expected time it would take to snap the deal sector +This will be used to fail the deals which cannot be sealed on time. +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "2h0m0s")`, + }, + { + Name: "SkipCommP", + Type: "bool", + + Comment: `SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain +Warning: If this check is skipped and there is a commP mismatch, all deals in the +sector will need to be sent again (Default: false)`, + }, + { + Name: "DisabledMiners", + Type: "[]string", + + Comment: `DisabledMiners is a list of miner addresses that should be excluded from online deal making protocols`, + }, + { + Name: "MaxConcurrentDealSizeGiB", + Type: "int64", + + Comment: `MaxConcurrentDealSizeGiB is a sum of all size of all deals which are waiting to be added to a sector +When the cumulative size of all deals in process reaches this number, new deals will be rejected. +(Default: 0 = unlimited)`, + }, + { + Name: "DenyUnknownClients", + Type: "bool", + + Comment: `DenyUnknownClients determines the default behaviour for the deal of clients which are not in allow/deny list +If True then all deals coming from unknown clients will be rejected. (Default: false)`, + }, + { + Name: "MaxParallelChunkUploads", + Type: "int", + + Comment: `MaxParallelChunkUploads defines the maximum number of upload operations that can run in parallel. (Default: 512)`, + }, + { + Name: "MinimumChunkSize", + Type: "int64", + + Comment: `MinimumChunkSize defines the smallest size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 16 MiB)`, + }, + { + Name: "MaximumChunkSize", + Type: "int64", + + Comment: `MaximumChunkSize defines the maximum size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 256 MiB)`, + }, + }, "MarketConfig": { { Name: "StorageMarketConfig", @@ -1199,6 +1264,12 @@ Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXX Comment: `MK12 encompasses all configuration related to deal protocol mk1.2.0 and mk1.2.1 (i.e. Boost deals)`, }, + { + Name: "MK20", + Type: "MK20Config", + + Comment: `MK20 encompasses all configuration related to deal protocol mk2.0 i.e. market 2.0`, + }, { Name: "IPNI", Type: "IPNIConfig", diff --git a/deps/config/types.go b/deps/config/types.go index f46d81a0a..f4de3605e 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -112,6 +112,13 @@ func DefaultCurioConfig() *CurioConfig { ExpectedSnapSealDuration: 2 * time.Hour, CIDGravityTokens: []string{}, }, + MK20: MK20Config{ + ExpectedPoRepSealDuration: 8 * time.Hour, + ExpectedSnapSealDuration: 2 * time.Hour, + MaxParallelChunkUploads: 512, + MinimumChunkSize: 16 * 1024 * 1024, // 16 MiB + MaximumChunkSize: 256 * 1024 * 1024, // 256 MiB + }, IPNI: IPNIConfig{ ServiceURL: []string{"https://cid.contact"}, DirectAnnounceURLs: []string{"https://cid.contact/ingest/announce"}, @@ -122,7 +129,7 @@ func DefaultCurioConfig() *CurioConfig { DomainName: "", ListenAddress: "0.0.0.0:12310", ReadTimeout: time.Second * 10, - IdleTimeout: time.Minute * 2, + IdleTimeout: time.Hour, ReadHeaderTimeout: time.Second * 5, EnableCORS: true, CSP: "inline", @@ -727,6 +734,9 @@ type StorageMarketConfig struct { // MK12 encompasses all configuration related to deal protocol mk1.2.0 and mk1.2.1 (i.e. Boost deals) MK12 MK12Config + // MK20 encompasses all configuration related to deal protocol mk2.0 i.e. market 2.0 + MK20 MK20Config + // IPNI configuration for ipni-provider IPNI IPNIConfig @@ -907,3 +917,41 @@ type MK12CollateralConfig struct { // Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "20 FIL") CollateralHighThreshold types.FIL } + +type MK20Config struct { + // ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector + // This will be used to fail the deals which cannot be sealed on time. + // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "8h0m0s") + ExpectedPoRepSealDuration time.Duration + + // ExpectedSnapSealDuration is the expected time it would take to snap the deal sector + // This will be used to fail the deals which cannot be sealed on time. + // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "2h0m0s") + ExpectedSnapSealDuration time.Duration + + // SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain + // Warning: If this check is skipped and there is a commP mismatch, all deals in the + // sector will need to be sent again (Default: false) + SkipCommP bool + + // DisabledMiners is a list of miner addresses that should be excluded from online deal making protocols + DisabledMiners []string + + // MaxConcurrentDealSizeGiB is a sum of all size of all deals which are waiting to be added to a sector + // When the cumulative size of all deals in process reaches this number, new deals will be rejected. + // (Default: 0 = unlimited) + MaxConcurrentDealSizeGiB int64 + + // DenyUnknownClients determines the default behaviour for the deal of clients which are not in allow/deny list + // If True then all deals coming from unknown clients will be rejected. (Default: false) + DenyUnknownClients bool + + // MaxParallelChunkUploads defines the maximum number of upload operations that can run in parallel. (Default: 512) + MaxParallelChunkUploads int + + // MinimumChunkSize defines the smallest size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 16 MiB) + MinimumChunkSize int64 + + // MaximumChunkSize defines the maximum size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 256 MiB) + MaximumChunkSize int64 +} diff --git a/deps/deps.go b/deps/deps.go index f23917927..8b43f55da 100644 --- a/deps/deps.go +++ b/deps/deps.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "crypto/rand" - "database/sql" "encoding/base64" "errors" "fmt" @@ -23,6 +22,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/samber/lo" "github.com/urfave/cli/v2" + "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -377,7 +377,8 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, dbHost = cctx.String("db-host") } - deps.IndexStore, err = indexstore.NewIndexStore(strings.Split(dbHost, ","), cctx.Int("db-cassandra-port"), deps.Cfg) + deps.IndexStore = indexstore.NewIndexStore(strings.Split(dbHost, ","), cctx.Int("db-cassandra-port"), deps.Cfg) + err = deps.IndexStore.Start(cctx.Context, false) if err != nil { return xerrors.Errorf("failed to start index store: %w", err) } @@ -389,7 +390,7 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, if deps.CachedPieceReader == nil { ppr := pieceprovider.NewPieceParkReader(deps.Stor, deps.Si) - deps.CachedPieceReader = cachedreader.NewCachedPieceReader(deps.DB, deps.SectorReader, ppr) + deps.CachedPieceReader = cachedreader.NewCachedPieceReader(deps.DB, deps.SectorReader, ppr, deps.IndexStore) } if deps.ServeChunker == nil { @@ -447,7 +448,7 @@ func GetConfig(ctx context.Context, layers []string, db *harmonydb.DB) (*config. text := "" err := db.QueryRow(ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { + if strings.Contains(err.Error(), pgx.ErrNoRows.Error()) { return nil, fmt.Errorf("missing layer '%s' ", layer) } if layer == "base" { @@ -479,7 +480,7 @@ func updateBaseLayer(ctx context.Context, db *harmonydb.DB) error { text := "" err = tx.QueryRow(`SELECT config FROM harmony_config WHERE title=$1`, "base").Scan(&text) if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { + if strings.Contains(err.Error(), pgx.ErrNoRows.Error()) { return false, fmt.Errorf("missing layer 'base' ") } return false, fmt.Errorf("could not read layer 'base': %w", err) diff --git a/docker/piece-server/sample/mk20-aggregate-car.sh b/docker/piece-server/sample/mk20-aggregate-car.sh new file mode 100755 index 000000000..ae2935415 --- /dev/null +++ b/docker/piece-server/sample/mk20-aggregate-car.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +set -e + +# ANSI escape codes for styling +ci="\e[3m" +cn="\e[0m" + +# Parameters for file generation +chunks=512 +links=8 +output_dir="/var/lib/curio-client/data/" +size=99700 +num_files=63 +piece_size=$((8 * 1024 * 1024)) # 8 MiB + +# Array to store generated CAR files +declare -a car_files + +# Step 1: Generate all files +echo "Generating $num_files random CAR files (size: $size bytes):" +for i in $(seq 1 "$num_files"); do + echo "Generating file $i..." + output=$(sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=$size "$output_dir" 2>&1) + car_file=$(echo "$output" | awk '{print $NF}') + new_car_file="${car_file%.car}" + mv "$car_file" "$new_car_file" + car_file="$new_car_file" + + if [[ -n "$car_file" ]]; then + car_files+=("$car_file") + echo "File $i generated: $car_file" + else + echo "Error: Failed to generate file $i" >&2 + exit 1 + fi +done + +if [[ ${#car_files[@]} -eq 0 ]]; then + echo "Error: No files were generated. Exiting." >&2 + exit 1 +fi + + +# Declare the base command and arguments +base_command="sptool --actor t01000 toolbox mk20-client aggregate --piece-size=$piece_size" + +# Append --file arguments for each file in the car_files array +for car_file in "${car_files[@]}"; do + base_command+=" --files=$car_file" +done + +# Debugging: Print the full constructed command +printf "${ci}%s\n\n${cn}" "$base_command" + +# Execute the constructed command +aggregate_output=$($base_command 2>&1) + +echo "$aggregate_output" + +# Step 3: Extract `CommP CID` and `Piece size` from the aggregate output +commp_cid=$(echo "$aggregate_output" | awk -F': ' '/CommP CID/ {print $2}' | xargs) + +# Validate that we got proper output +if [[ -z "$commp_cid" ]]; then + echo "Error: Failed to extract CommP CID from aggregation output" >&2 + exit 1 +fi + +# Step 4: Check and display the aggregate file +aggregate_file="aggregate_${commp_cid}" +if [[ -f "$aggregate_file" ]]; then + echo "Aggregate file stored at: $aggregate_file" + echo "Content of $aggregate_file:" + cat "$aggregate_file" +else + echo "Error: Aggregate file $aggregate_file not found!" >&2 +fi + +# Step 5: Print Results +echo -e "\n${ci}Aggregation Results:${cn}" +echo "CommP CID: $commp_cid" + + +miner_actor=$(lotus state list-miners | grep -v t01000) + +################################################################################### +printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ +--pcidv2=$commp_cid --contract-address 0xtest --contract-verify-method test \ +--aggregate "$aggregate_file"\n\n${cn}" + +sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --pcidv2=$commp_cid --contract-address 0xtest --contract-verify-method test --aggregate "$aggregate_file" + +echo -e "\nDone!" \ No newline at end of file diff --git a/docker/piece-server/sample/mk20-ddo.sh b/docker/piece-server/sample/mk20-ddo.sh new file mode 100755 index 000000000..406aaf78c --- /dev/null +++ b/docker/piece-server/sample/mk20-ddo.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -e + +ci="\e[3m" +cn="\e[0m" + +chunks="${1:-51200}" +links="${2:-100}" + +printf "${ci}sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'\n\n${cn}" + +FILE=`sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'` +PAYLOAD_CID=$(find "$FILE" | xargs -I{} basename {} | sed 's/\.car//') + +read COMMP_CID PIECE CAR < <(sptool --actor t01000 toolbox mk12-client commp $FILE 2>/dev/null | awk -F': ' '/CID/ {cid=$2} /Piece/ {piece=$2} /Car/ {car=$2} END {print cid, piece, car}') +miner_actor=$(lotus state list-miners | grep -v t01000) + +mv /var/lib/curio-client/data/$PAYLOAD_CID.car /var/lib/curio-client/data/$COMMP_CID + +sptool --actor t01000 toolbox mk12-client allocate -y -p $miner_actor --piece-cid $COMMP_CID --piece-size $PIECE --confidence 0 + +CLIENT=$(sptool --actor t01000 toolbox mk12-client wallet default) + +ALLOC=$(sptool --actor t01000 toolbox mk12-client list-allocations -j | jq -r --arg cid "$COMMP_CID" '.allocations | to_entries[] | select(.value.Data["/"] == $cid) | .key') + +printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ +--http-url=http://piece-server:12320/pieces?id=$PAYLOAD_CID \ +--commp=$COMMP_CID --car-size=$CAR --piece-size=$PIECE \ +--contract-address 0xtest --contract-verify-method test --allocation=$ALLOC\n\n${cn}" + +sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --http-url=http://piece-server:12320/pieces?id=$PAYLOAD_CID --commp=$COMMP_CID --car-size=$CAR --piece-size=$PIECE --contract-address 0xtest --contract-verify-method test --allocation $ALLOC \ No newline at end of file diff --git a/docker/piece-server/sample/mk20-random-deal.sh b/docker/piece-server/sample/mk20-random-deal.sh new file mode 100755 index 000000000..51a43f4ce --- /dev/null +++ b/docker/piece-server/sample/mk20-random-deal.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -e + +ci="\e[3m" +cn="\e[0m" + + +put="${1:-false}" +offline="${2:-false}" +chunks="${3:-51200}" +links="${4:-100}" + +printf "${ci}sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'\n\n${cn}" +FILE=`sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'` +read COMMP_CID PIECE CAR < <(sptool --actor t01000 toolbox mk20-client commp $FILE 2>/dev/null | awk -F': ' '/CID/ {cid=$2} /Piece/ {piece=$2} /Car/ {car=$2} END {print cid, piece, car}') + +mv $FILE /var/lib/curio-client/data/$COMMP_CID + +miner_actor=$(lotus state list-miners | grep -v t01000) + +if [ "$put" == "true" ]; then + ################################################################################### + printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ + --pcidv2=$COMMP_CID \ + --contract-address 0xtest --contract-verify-method test --put\n\n${cn}" + + sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test --put + +else + + if [ "$offline" == "true" ]; then + + ################################################################################### + printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ + --pcidv2=$COMMP_CID \ + --contract-address 0xtest --contract-verify-method test\n\n${cn}" + + sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test + + else + ################################################################################### + printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ + --http-url=http://piece-server:12320/pieces?id=$COMMP_CID \ + --pcidv2=$COMMP_CID \ + --contract-address 0xtest --contract-verify-method test\n\n${cn}" + + sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --http-url=http://piece-server:12320/pieces?id=$COMMP_CID --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test + + fi + +fi + + diff --git a/docker/piece-server/sample/pdp.sh b/docker/piece-server/sample/pdp.sh new file mode 100755 index 000000000..aee058635 --- /dev/null +++ b/docker/piece-server/sample/pdp.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +set -e + +ci="\e[3m" +cn="\e[0m" + + +put="${1:-false}" +offline="${2:-false}" +chunks="${3:-51200}" +links="${4:-100}" + +printf "${ci}sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'\n\n${cn}" +FILE=`sptool --actor t01000 toolbox mk12-client generate-rand-car -c=$chunks -l=$links -s=5120000 /var/lib/curio-client/data/ | awk '{print $NF}'` +read COMMP_CID PIECE CAR < <(sptool --actor t01000 toolbox mk20-client commp $FILE 2>/dev/null | awk -F': ' '/CID/ {cid=$2} /Piece/ {piece=$2} /Car/ {car=$2} END {print cid, piece, car}') + +mv $FILE /var/lib/curio-client/data/$COMMP_CID + +miner_actor=$(lotus state list-miners | grep -v t01000) +printf "$COMMP_CID\n" +printf "$PIECE\n" +printf "$CAR\n" + +#if [ "$put" == "true" ]; then +# ################################################################################### +# printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ +# --pcidv2=$COMMP_CID \ +# --contract-address 0xtest --contract-verify-method test --put\n\n${cn}" +# +# sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test --put +# +#else +# +# if [ "$offline" == "true" ]; then +# +# ################################################################################### +# printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ +# --pcidv2=$COMMP_CID \ +# --contract-address 0xtest --contract-verify-method test\n\n${cn}" +# +# sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test +# +# else +# ################################################################################### +# printf "${ci}sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor \ +# --http-url=http://piece-server:12320/pieces?id=$COMMP_CID \ +# --pcidv2=$COMMP_CID \ +# --contract-address 0xtest --contract-verify-method test\n\n${cn}" +# +# sptool --actor t01000 toolbox mk20-client deal --provider=$miner_actor --http-url=http://piece-server:12320/pieces?id=$COMMP_CID --pcidv2=$COMMP_CID --contract-address 0xtest --contract-verify-method test +# +# fi +# +#fi diff --git a/documentation/en/SUMMARY.md b/documentation/en/SUMMARY.md index 0ef7ec991..56c4ef47f 100644 --- a/documentation/en/SUMMARY.md +++ b/documentation/en/SUMMARY.md @@ -27,6 +27,10 @@ * [Market UI](curio-market/market-ui.md) * [Retrievals](curio-market/retrievals.md) * [Migrating From Boost](curio-market/migrating-from-boost.md) +* [Market 2.0](market-2.0/README.md) + * [Products](market-2.0/products.md) + * [Market Contracts](market-2.0/contracts.md) + * [API](market-2.0/api.md) * [Snap Deals](snap-deals.md) * [Batch Sealing with SupraSeal](supraseal.md) * [Scaling Curio cluster](scaling-curio-cluster.md) diff --git a/documentation/en/configuration/default-curio-configuration.md b/documentation/en/configuration/default-curio-configuration.md index c2281dc57..e81cdaf5d 100644 --- a/documentation/en/configuration/default-curio-configuration.md +++ b/documentation/en/configuration/default-curio-configuration.md @@ -550,7 +550,7 @@ description: The default curio configuration # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "5m0s") # # type: time.Duration - #IdleTimeout = "2m0s" + #IdleTimeout = "1h0m0s" # ReadHeaderTimeout is amount of time allowed to read request headers # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "5m0s") @@ -701,6 +701,60 @@ description: The default curio configuration # type: bool #DefaultCIDGravityAccept = false + # MK20 encompasses all configuration related to deal protocol mk2.0 i.e. market 2.0 + # + # type: MK20Config + [Market.StorageMarketConfig.MK20] + + # ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector + # This will be used to fail the deals which cannot be sealed on time. + # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "8h0m0s") + # + # type: time.Duration + #ExpectedPoRepSealDuration = "8h0m0s" + + # ExpectedSnapSealDuration is the expected time it would take to snap the deal sector + # This will be used to fail the deals which cannot be sealed on time. + # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "2h0m0s") + # + # type: time.Duration + #ExpectedSnapSealDuration = "2h0m0s" + + # SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain + # Warning: If this check is skipped and there is a commP mismatch, all deals in the + # sector will need to be sent again (Default: false) + # + # type: bool + #SkipCommP = false + + # MaxConcurrentDealSizeGiB is a sum of all size of all deals which are waiting to be added to a sector + # When the cumulative size of all deals in process reaches this number, new deals will be rejected. + # (Default: 0 = unlimited) + # + # type: int64 + #MaxConcurrentDealSizeGiB = 0 + + # DenyUnknownClients determines the default behaviour for the deal of clients which are not in allow/deny list + # If True then all deals coming from unknown clients will be rejected. (Default: false) + # + # type: bool + #DenyUnknownClients = false + + # MaxParallelChunkUploads defines the maximum number of upload operations that can run in parallel. (Default: 512) + # + # type: int + #MaxParallelChunkUploads = 512 + + # MinimumChunkSize defines the smallest size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 16 MiB) + # + # type: int64 + #MinimumChunkSize = 16777216 + + # MaximumChunkSize defines the maximum size of a chunk allowed for processing, expressed in bytes. Must be a power of 2. (Default: 256 MiB) + # + # type: int64 + #MaximumChunkSize = 268435456 + # IPNI configuration for ipni-provider # # type: IPNIConfig diff --git a/documentation/en/curio-cli/sptool.md b/documentation/en/curio-cli/sptool.md index c686d4c0c..2cee56b86 100644 --- a/documentation/en/curio-cli/sptool.md +++ b/documentation/en/curio-cli/sptool.md @@ -503,6 +503,7 @@ USAGE: COMMANDS: spark Manage Smart Contract PeerID used by Spark mk12-client mk12 client for Curio + mk20-client mk20 client for Curio help, h Shows a list of commands or help for one command OPTIONS: @@ -888,3 +889,162 @@ USAGE: OPTIONS: --help, -h show help ``` + +### sptool toolbox mk20-client +``` +NAME: + sptool toolbox mk20-client - mk20 client for Curio + +USAGE: + sptool toolbox mk20-client command [command options] + +COMMANDS: + init Initialise curio mk12 client repo + commp + deal Make a mk20 deal with Curio + pdp-deal Make a mk20 PDP deal with Curio + aggregate Create a new aggregate from a list of CAR files + upload Upload a file to the storage provider + chunk-upload Upload a file in chunks to the storage provider + deal-status Get status of a Mk20 deal + help, h Shows a list of commands or help for one command + +OPTIONS: + --mk12-client-repo value repo directory for mk12 client (default: "~/.curio-client") [$CURIO_MK12_CLIENT_REPO] + --help, -h show help +``` + +#### sptool toolbox mk20-client init +``` +NAME: + sptool toolbox mk20-client init - Initialise curio mk12 client repo + +USAGE: + sptool toolbox mk20-client init [command options] + +OPTIONS: + --help, -h show help +``` + +#### sptool toolbox mk20-client commp +``` +NAME: + sptool toolbox mk20-client commp + +USAGE: + sptool toolbox mk20-client commp [command options] + +OPTIONS: + --help, -h show help +``` + +#### sptool toolbox mk20-client deal +``` +NAME: + sptool toolbox mk20-client deal - Make a mk20 deal with Curio + +USAGE: + sptool toolbox mk20-client deal [command options] + +OPTIONS: + --http-url value http url to CAR file + --http-headers value [ --http-headers value ] http headers to be passed with the request (e.g key=value) + --provider value storage provider on-chain address + --pcidv2 value pcidv2 of the CAR file + --duration value duration of the deal in epochs (default: 518400) + --contract-address value contract address of the deal + --contract-verify-method value contract verify method of the deal + --allocation value allocation id of the deal (default: 0) + --indexing indicates that an deal should be indexed (default: true) + --wallet value wallet address to be used to initiate the deal + --announce indicates that deal should be announced to the IPNI(Network Indexer) (default: true) + --aggregate value aggregate file path for the deal + --put used HTTP put as data source (default: false) + --help, -h show help +``` + +#### sptool toolbox mk20-client pdp-deal +``` +NAME: + sptool toolbox mk20-client pdp-deal - Make a mk20 PDP deal with Curio + +USAGE: + sptool toolbox mk20-client pdp-deal [command options] + +OPTIONS: + --http-url value http url to CAR file + --http-headers value [ --http-headers value ] http headers to be passed with the request (e.g key=value) + --provider value PDP providers's URL + --pcidv2 value pcidv2 of the CAR file + --wallet value wallet address to be used to initiate the deal + --aggregate value aggregate file path for the deal + --put used HTTP put as data source (default: false) + --add-piece add piece (default: false) + --add-dataset add dataset (default: false) + --remove-piece remove piece (default: false) + --remove-dataset remove dataset (default: false) + --record-keeper value record keeper address + --piece-id value [ --piece-id value ] root IDs + --dataset-id value dataset IDs (default: 0) + --help, -h show help +``` + +#### sptool toolbox mk20-client aggregate +``` +NAME: + sptool toolbox mk20-client aggregate - Create a new aggregate from a list of CAR files + +USAGE: + sptool toolbox mk20-client aggregate [command options] + +OPTIONS: + --files value [ --files value ] list of CAR files to aggregate + --piece-size value piece size of the aggregate (default: 0) + --out output the aggregate file (default: true) + --help, -h show help +``` + +#### sptool toolbox mk20-client upload +``` +NAME: + sptool toolbox mk20-client upload - Upload a file to the storage provider + +USAGE: + sptool toolbox mk20-client upload [command options] + +OPTIONS: + --provider value PDP providers's URL + --deal value deal id to upload to + --help, -h show help +``` + +#### sptool toolbox mk20-client chunk-upload +``` +NAME: + sptool toolbox mk20-client chunk-upload - Upload a file in chunks to the storage provider + +USAGE: + sptool toolbox mk20-client chunk-upload [command options] + +OPTIONS: + --provider value storage provider on-chain address + --deal value deal id to upload to + --chunk-size value chunk size to be used for the upload (default: "4 MiB") + --wallet value wallet address to be used to initiate the deal + --help, -h show help +``` + +#### sptool toolbox mk20-client deal-status +``` +NAME: + sptool toolbox mk20-client deal-status - Get status of a Mk20 deal + +USAGE: + sptool toolbox mk20-client deal-status [command options] + +OPTIONS: + --provider value PDP providers's URL + --id value deal id + --wallet value wallet address to be used to initiate the deal + --help, -h show help +``` diff --git a/documentation/en/market-2.0/README.md b/documentation/en/market-2.0/README.md new file mode 100644 index 000000000..6da4cd306 --- /dev/null +++ b/documentation/en/market-2.0/README.md @@ -0,0 +1,192 @@ +# Market 2.0 + +This guide introduces the new Filecoin Market 2.0 architecture for clients, developers, and aggregators. It explains how to use the new modular, contract-governed storage market and how to interact with Curio-based storage providers under this new system. + +--- + +## 🧭 Overview + +Filecoin's Market 2.0 removes legacy assumptions of the built-in storage market actor. Instead, deals are processed through **user-defined smart contracts**, allowing: + +* Flexible pricing and service terms +* Support for custom retrieval logic +* Contract-governed deal lifecycle +* Composability via extensible "products" + +Curio's role is purely to onboard data and respect contract terms—it does not mediate pricing, payments, or retrieval policy. + +--- + +## 📡 Supported Endpoints + +### 🔄 POST `/market/mk20/store` + +Accept a new deal (JSON body). + +* Auto-validates structure, products, sources, contract +* If valid, returns `200 OK` +* Otherwise returns appropriate error code (e.g. `400`, `422`, etc) + +### 🧾 GET `/market/mk20/status?id=` + +Check the status of a deal. + +* Returns one of: `accepted`, `processing`, `sealing`, `indexing`, `complete`, or `failed` + +### 🗂 PUT `/market/mk20/data?id=` + +Used only when `source_httpput` is selected. + +* Clients upload raw bytes +* `Content-Length` must match raw size + +### 📜 GET `/market/mk20/contracts` + +Return list of supported contract addresses + +### 🧠 GET `/market/mk20/info` + +Markdown documentation of deal format and validation rules + +### 🧠 GET `/market/mk20/producs` + +Json list of products supported by the provider + +### 🧠 GET `/market/mk20/sources` + +Json list of data sources supported by the provider + +--- + +## 🧑‍💻 Clients + +### 📝 Submitting a Deal + +Clients submit a deal to a Curio node using the `/market/mk20/store` endpoint. A deal includes: + +* A unique ULID identifier +* A `DataSource` (e.g. HTTP, offline, PUT) +* One or more `Products` (like `ddov1`) that define how the deal should behave + +#### Example Products: + +* `ddov1`: governs how the data should be stored and verified +* (future) `aclv1`: may define retrieval access controls +* (future) `retrievalv1`: may define retrieval SLA or payment terms + +### 🛠 Smart Contract Control + +Clients must select a contract that: + +* Is supported by the SP +* Implements a deal validation method that returns a valid DealID + +Clients pass the contract address, method name, and encoded params. + +### 🔁 Deal Lifecycle + +The contract governs whether the deal is valid. If valid: + +* The SP accepts and starts onboarding the data +* The deal may be indexed and/or announced to IPNI, based on deal config +* Data may be retrieved later via PieceCIDv2, PayloadCID, or subpiece CID + +--- + +## 🧱 Developers + +### 🧩 Building New Products + +Each product is a self-contained struct in the deal payload. Developers can: + +* Define new product types (e.g., `aclv1`, `retrievalv1`, `auditv1`) +* Implement validation logic on the SP side +* Optionally affect indexing, retrievals, ACLs, or other lifecycle aspects + +This makes the deal structure extensible **without requiring protocol or DB changes.** + +### 🧠 Writing Market Contracts + +A contract must: + +* Be added to the SP's whitelist +* Implement a method (e.g. `verifyDeal`) that takes a single `bytes` parameter +* Return a valid deal ID if the deal is accepted + +Contracts can implement features like: + +* Off-chain or on-chain ACL logic +* Multi-party deal approval +* FIL+ verification +* SLA enforcement + +--- + +## 🔁 Aggregators + +Data aggregators in Market 2.0 should: + +* No longer implement protocol-level workarounds (like ACLs or approvals) +* Provide value-added services like dashboards, alerts, analytics, SDKs +* Optionally act as data sources for multi-client deal generation + +Market 2.0 aims to reduce dependency on aggregators by letting providers and contracts do the heavy lifting. + +--- + +## 📦 Retrievals + +Curio supports the following retrieval inputs: + +* **PieceCIDv2**: required for all piece-level retrievals +* **PayloadCID**: if indexing is enabled +* **Subpiece CID**: if the deal was aggregated and subpieces were indexed + +ACL-based gating is not yet implemented, but future products can enable it. + +--- + +## ♻️ Deal Lifecycle in Curio + +1. **Client submits** deal with products, data, and contract call info +2. **Curio validates** all inputs and uses the contract to get a DealID +3. **Data is onboarded** via HTTP, offline import, PUT, or aggregation +4. **Products control** indexing, IPNI, and future extensibility +5. **Data is removed** from disk and DB when the sector expires + +--- + +## 🧪 Current Product: `ddov1` + +This product represents the first non-native Filecoin market product. It includes: + +* Provider, client, and piece manager addresses +* Optional AllocationID (or minimum duration) +* Contract + verification method for DealID +* Indexing and IPNI flags +* Notification hooks for deal lifecycle events + +More details can be found in the product schema or SP guide. + +--- + +## 🔮 Future Directions + +* ACL enforcement via `aclv1` +* Retrieval policy enforcement via `retrievalv1` +* Sealed sector access / download pricing +* Smart contract SLAs and renewals +* Market UIs and dashboards + +--- + +## ✅ Summary + +Market 2.0 enables: + +* Composable, contract-governed storage deals +* Modular product design +* Client-friendly HTTP-first onboarding +* Decoupled market innovation from SP software +* Stronger integration paths for aggregators and external tools + diff --git a/documentation/en/market-2.0/api.md b/documentation/en/market-2.0/api.md new file mode 100644 index 000000000..5e23cbff5 --- /dev/null +++ b/documentation/en/market-2.0/api.md @@ -0,0 +1,209 @@ +# MK20 API Reference + +This document describes the HTTP endpoints supported by the Market 2.0 module in Curio. These endpoints are used by clients and external systems to submit storage deals, upload data, track status, and fetch provider configuration. + +--- + +## 🌐 Base Path + +All endpoints are exposed under: + +``` +/market/mk20 +``` + +--- + +## 🔄 POST `/store` + +Submit a new MK20 deal to the storage provider. + +* **Content-Type**: `application/json` +* **Body**: JSON-encoded Deal +* **Query Parameters**: None + +### ✅ Response + +* `200 OK` – Deal accepted successfully +* `400 Bad Request` – Malformed JSON or missing required fields +* `422 Unprocessable Entity` – Unsupported product or data source +* `426` – Deal rejected by contract +* `430` – Malformed data source +* `441` – Deal duration too short + +### 🧪 Example + +```http +POST /market/mk20/store +Content-Type: application/json + +{ + "identifier": "01H9Y...", + "data": { ... }, + "products": { ... } +} +``` + +--- + +## 🧾 GET `/status` + +Retrieve the current processing status of a deal. + +* **Query Parameters**: + + * `id`: ULID of the deal + +### ✅ Response + +* `200 OK`: Returns JSON-encoded status +* `400 Bad Request`: Missing or malformed `id` +* `404 Not Found`: No such deal +* `500 Internal Server Error`: Unexpected backend error + +### 📄 Response Schema + +```json +{ + "status": "accepted" | "processing" | "sealing" | "indexing" | "complete" | "failed", + "error_msg": "string (optional)" +} +``` + +--- + +## 🗂 PUT `/data` + +Upload raw deal data for deals that declared a `source_httpput` source. + +* **Headers**: + + * `Content-Type: application/octet-stream` + * `Content-Length`: must match declared raw size +* **Query Parameters**: + + * `id`: ULID of the deal +* **Body**: Raw byte stream + +### ✅ Response + +* `200 OK`: Data accepted +* `400 Bad Request`: Invalid/missing content headers +* `413 Payload Too Large`: Content exceeds allowed size +* `415 Unsupported Media Type`: Incorrect content type + +--- + +## 📜 GET `/contracts` + +Return a list of smart contract addresses currently whitelisted by the provider. + +### ✅ Response + +```json +{ + "contracts": [ + "0x123...", + "0xabc..." + ] +} +``` + +* `200 OK`: List of contracts +* `500 Internal Server Error`: Failure fetching contract list + +--- + +## 🧠 GET `/info` + +Returns markdown-formatted documentation describing: + +* Supported deal structure +* Data source formats +* Product extensions + +### ✅ Response + +* `200 OK`: Markdown string +* `500 Internal Server Error`: If the info file cannot be generated + +--- + +### 🧰 GET `/products` + +Fetch a JSON list of supported deal products enabled on this provider. + +- **Content-Type**: N/A +- **Body**: N/A +- **Query Parameters**: N/A + +#### ✅ Response +- `200 OK`: JSON array of enabled products +- `500 Internal Server Error`: If the list cannot be fetched + +#### 🧪 Example Response +```json +{ + "products": [ + "ddo_v1", + "aclv1" + ] +} +``` + +--- + +### 🌐 GET `/sources` + +Fetch a JSON list of supported data source types enabled on this provider. + +- **Content-Type**: N/A +- **Body**: N/A +- **Query Parameters**: N/A + +#### ✅ Response +- `200 OK`: JSON array of enabled data sources +- `500 Internal Server Error`: If the list cannot be fetched + +#### 🧪 Example Response +```json +{ + "sources": [ + "http", + "offline", + "put", + "aggregate" + ] +} +``` + +--- + +## 📑 Error Code Summary + +| Code | Meaning | +| ---- | ---------------------------------- | +| 200 | Success | +| 400 | Bad proposal or malformed JSON | +| 422 | Unsupported product or data source | +| 426 | Deal rejected by contract | +| 430 | Malformed data source | +| 441 | Duration too short | +| 500 | Internal server error | + +--- + +## 🧩 Status Code Values (from `/status`) + +| Value | Meaning | +| ------------ | ----------------------------------------------- | +| `accepted` | Deal was accepted and is waiting for processing | +| `processing` | Deal is being staged or fetched | +| `sealing` | Deal is being sealed into a sector | +| `indexing` | Deal is being indexed for CID-based retrievals | +| `complete` | Deal has been sealed and is finalized | +| `failed` | Deal failed at any point in the pipeline | + +--- + +For full type schemas, see the `/info` endpoint or consult the documentation. diff --git a/documentation/en/market-2.0/contracts.md b/documentation/en/market-2.0/contracts.md new file mode 100644 index 000000000..757ec2364 --- /dev/null +++ b/documentation/en/market-2.0/contracts.md @@ -0,0 +1,119 @@ +# Smart Contract Integration Guide + +This guide explains how to write, deploy, and integrate a smart contract that governs storage deals in the Market 2.0 architecture. Contracts are responsible for determining whether a deal is valid and returning a DealID. + +--- + +## 🎯 Purpose of the Contract + +In Market 2.0, contracts are used to: + +* Accept or reject deals +* Optionally implement additional business logic (e.g. FIL+ validation, payments, approvals) +* Return a DealID string if accepted + +The contract does **not** manage storage or retrieval itself—that is handled by the SP. + +--- + +## ✅ Requirements + +A valid Market 2.0 contract must: + +1. Be deployed on a supported chain (e.g. Filecoin EVM, Hyperspace, etc) +2. Be whitelisted by the storage provider (via UI or admin tool) +3. Have its ABI uploaded +4. Expose a method that: + + * Accepts a single `bytes` input + * Returns a string (representing the DealID) + +--- + +## 🔁 Flow + +1. Client encodes parameters for your method +2. Client submits deal to Curio with: + + * Contract address + * Method name + * ABI-encoded parameters +3. Curio: + + * Loads ABI + * Packs the method call + * Calls `eth_call` + * Unpacks the return value + +If the method returns a string → deal is accepted. If empty string or call fails → deal is rejected. + +--- + +## 🧪 Example Contract Method + +```solidity +function verifyDeal(bytes calldata params) external view returns (string memory) { + // decode params into your structure + // perform validation + // return deal ID if valid + return "deal-123"; +} +``` + +--- + +## 📜 ABI Upload + +The SP must upload the ABI JSON for your contract when whitelisting it: + +* This enables Curio to find and call the method +* ABI must include the method name, inputs, and return types + +--- + +## 🔐 Client Responsibilities + +Clients must: + +* Choose a contract accepted by the SP +* Encode call parameters into `[]byte` +* Provide method name and contract address in the deal + +--- + +## 🧩 Products and Contract Use + +Contracts are typically used from within a **product** (e.g. `ddov1`). The product defines: + +* Contract address +* Method name +* Encoded params (using ABI rules) + +This decouples contract logic from storage logic and keeps deals composable. + +--- + +## 🚫 Common Errors + +| Error | Cause | +| ------------------------------- | ---------------------------------------------- | +| `426 Deal rejected by contract` | Returned string is empty or `eth_call` fails | +| `ABI not found` | Contract not whitelisted or ABI missing | +| `Invalid method` | Method name not found in ABI | +| `Incorrect input format` | Method doesn’t accept single `bytes` parameter | + +--- + +## ✅ Checklist for Integration + +* [ ] Deploy contract on supported chain +* [ ] Expose a `function(bytes) returns (string)` method +* [ ] Whitelist contract via SP UI +* [ ] Upload ABI including the method +* [ ] Coordinate with clients on method + param encoding + +--- + +This guide enables market developers to plug in custom contract logic without requiring any changes to Curio or the storage pipeline. + +Welcome to programmable storage governance. diff --git a/documentation/en/market-2.0/products.md b/documentation/en/market-2.0/products.md new file mode 100644 index 000000000..00f277250 --- /dev/null +++ b/documentation/en/market-2.0/products.md @@ -0,0 +1,246 @@ +# Products & Extensibility Guide + +Market 2.0 introduces a fully extensible framework for storage deal configuration. This guide explains how products work, how new ones can be added, and how developers and providers can safely evolve without changing core Curio logic. + +--- + +## 🧩 What Is a Product? + +A **product** is a named section of a deal that adds optional logic or configuration. Each product defines one or more aspects of the deal lifecycle. + +Examples: + +* `ddov1` – controls how data is onboarded and what contract governs it +* `aclv1` *(future)* – may define access control permissions +* `retrievalv1` *(future)* – may define retrieval conditions or SLA pricing + +Each product is a top-level field in the `products` object in a deal: + +```json +"products": { + "ddo_v1": { ... }, + "aclv1": { ... } +} +``` + +--- + +## 🛠 Product Responsibilities + +A product may: + +* Validate a deal before acceptance +* Provide smart contract call details +* Affect retrieval behavior (e.g. IPNI, ACLs) +* Receive notifications (e.g. on sector sealing) + +A product **must not**: + +* Trigger storage actions directly (Curio handles onboarding) +* Conflict with other products +* Depend on runtime configuration (products are static per deal) + +--- + +## 📐 Product Structure + +All products are Go structs that implement the following interface-like behavior: + +* A `.Validate(*DB, *Config) (ErrorCode, error)` method +* Optional `.GetDealID()` logic if a contract call is needed +* Unique product name (`ProductNameDDOV1`, etc) + +Products are stored in JSON under the `products` field. + +--- + +## 🧪 Example: `ddov1` + +The `ddov1` product includes: + +* Provider, client, and piece manager addresses +* Duration or allocation ID +* Smart contract call details: address, method, params +* Flags for indexing and IPNI +* Optional notification hooks + +Curio uses these fields to validate the deal, determine storage lifecycle, and optionally announce to IPNI. + +--- + +## 🛡 ACLs and Retrieval Products (Future) + +Market 2.0 was designed to support retrieval-layer enforcement through: + +* ACL products (e.g., define who can retrieve what, when) +* Retrieval policy products (e.g., define pricing, terms) + +These will live alongside onboarding products like `ddov1`. + +--- + +## ✅ Design Philosophy + +* Each product handles one concern +* Multiple products can be included in one deal +* Future products won't require code changes to existing ones +* Extensibility is done via composition, not inheritance + +--- + +## 📦 Summary + +| Concept | Description | +| ------------ | -------------------------------------------------------- | +| Product | Modular block in a deal defining optional behavior | +| Validation | Each product validates its own logic | +| Contract | Products may define how to obtain deal ID | +| Future-proof | New products can be added without DB or protocol changes | + +Products are the core of Market 2.0's flexibility—allowing new ideas to be layered in without disrupting existing workflows. + +# Write Your Own Product – Developer Guide + +This guide walks developers through creating a custom **product** for Market 2.0. Products add modular capabilities to deals—ranging from storage control to retrieval logic, ACLs, SLAs, and beyond. + +--- + +Each product must: + +* Implement validation +* Optionally provide contract call instructions (if needed) +* Return its canonical product name + +--- + +## 🧱 Structure of a Product + +Each product is a Go struct with a few key methods: + +```go +type MyProduct struct { + SomeField string `json:"some_field"` + // More fields... +} + +func (p *MyProduct) Validate(db *harmonydb.DB, cfg *config.MK20Config) (ErrorCode, error) { + // Check for required fields + // Enforce constraints + return Ok, nil +} + +func (p *MyProduct) ProductName() ProductName { + return "myproductv1" +} +``` + +--- + +## 🛠 Adding a New Product (Step-by-Step) + +### 1. Define Struct in `types.go` + +Add a new `MyProduct` struct to the `Products` block: + +```go +type Products struct { + DDOV1 *DDOV1 `json:"ddo_v1"` + MyProduct *MyProduct `json:"myproduct_v1"` +} +``` + +### 2. Implement `.Validate()` + +Use `Validate()` to define how the product ensures the deal is valid. +You may: + +* Check required fields +* Enforce logic (e.g. if X is true, Y must also be set) +* Query DB if needed + +Return `ErrorCode` and reason for failure. + +### 3. Optionally: Contract Integration + +If your product relies on a contract, implement: + +```go +func (p *MyProduct) GetDealID(...) (string, ErrorCode, error) +``` + +This is how `ddov1` fetches DealID via contract call. + +### 4. Add to JSON Marshal/Unmarshal + +Nothing needed—`Products` already uses JSON tags. +Curio stores each product as a JSON field under `products` in DB. + +### 5. Update UI Toggle Support (Optional) + +Add a toggle entry in the admin panel: + +* `market_mk20_products` table +* Use your product name as key (`myproduct_v1`) +* Enable or disable per deployment + +### 6. Document via `/info` + +Update the markdown generator so your product shows up in `/market/mk20/info`. + +--- + +## 🧪 Example Use Case: Retrieval Policy + +You might want to create `retrievalv1` with: + +```go +type RetrievalV1 struct { + PayPerByte bool `json:"pay_per_byte"` + MaxBandwidth int `json:"max_bandwidth_kbps"` + AllowedIPs []string `json:"allowed_ips"` +} +``` + +And enforce in `.Validate()`: + +```go +if p.PayPerByte && p.MaxBandwidth == 0 { + return ErrProductValidationFailed, xerrors.Errorf("bandwidth limit required for paid retrieval") +} +``` + +Later, your retrieval service can look up this product and apply pricing. + +--- + +## ✅ Guidelines + +| Rule | Description | +| ------------------ | ----------------------------------------- | +| ✅ Modular | Product should only affect its own logic | +| ✅ Optional | Products are opt-in per deal | +| ✅ Composable | Multiple products can exist in one deal | +| ❌ No Runtime State | Product logic is static and stateless | +| ❌ No Storage Logic | Curio handles onboarding, not the product | + +--- + +## 🔄 Deployment Considerations + +* Curio does not require a restart to recognize new products +* Products not enabled in DB will be rejected during validation +* Ensure all field names are `snake_case` in JSON + +--- + +## 📦 Summary + +Products are the extension mechanism of Market 2.0: + +* Validated independently +* Optional per deal +* Zero-conflict by design +* Fully extensible without schema or protocol changes + +Use them to inject new behaviors into Curio without touching the base system. + diff --git a/go.mod b/go.mod index 0f769f639..f63f277a6 100644 --- a/go.mod +++ b/go.mod @@ -25,8 +25,9 @@ require ( github.com/filecoin-project/go-commp-utils v0.1.4 github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8 github.com/filecoin-project/go-commp-utils/v2 v2.1.0 + github.com/filecoin-project/go-data-segment v0.0.1 github.com/filecoin-project/go-f3 v0.8.10 - github.com/filecoin-project/go-fil-commcid v0.2.0 + github.com/filecoin-project/go-fil-commcid v0.3.1 github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 github.com/filecoin-project/go-jsonrpc v0.8.0 github.com/filecoin-project/go-padreader v0.0.1 @@ -52,7 +53,7 @@ require ( github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 + github.com/icza/backscanner v0.0.0-20240328210400-b40c3a86dec5 github.com/invopop/jsonschema v0.12.0 github.com/ipfs/boxo v0.33.0 github.com/ipfs/go-block-format v0.2.2 @@ -68,7 +69,7 @@ require ( github.com/ipld/go-car v0.6.2 github.com/ipld/go-car/v2 v2.15.0 github.com/ipld/go-ipld-prime v0.21.0 - github.com/ipni/go-libipni v0.6.13 + github.com/ipni/go-libipni v0.6.19 github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 github.com/jellydator/ttlcache/v2 v2.11.1 github.com/kelseyhightower/envconfig v1.4.0 @@ -78,12 +79,14 @@ require ( github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 + github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.16.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.2 github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-varint v0.0.7 + github.com/oklog/ulid v1.3.1 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 @@ -94,6 +97,8 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/snadrus/must v0.0.0-20240605044437-98cedd57f8eb github.com/stretchr/testify v1.10.0 + github.com/swaggo/http-swagger/v2 v2.0.2 + github.com/swaggo/swag v1.16.4 github.com/triplewz/poseidon v0.0.2 github.com/urfave/cli/v2 v2.27.5 github.com/whyrusleeping/cbor-gen v0.3.2-0.20250409092040-76796969edea @@ -119,13 +124,11 @@ require ( github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee // indirect github.com/Jorropo/jsync v1.0.1 // indirect github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa // indirect + github.com/KyleBanks/depth v1.2.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/StackExchange/wmi v1.2.1 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/akavel/rsrc v0.8.0 // indirect + github.com/akavel/rsrc v0.10.2 // indirect github.com/andybalholm/brotli v1.1.0 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect @@ -166,7 +169,7 @@ require ( github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect - github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v3 v3.1.1 // indirect github.com/filecoin-project/go-amt-ipld/v4 v4.4.0 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/filecoin-project/go-crypto v0.1.0 // indirect @@ -193,15 +196,15 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.19.3 // indirect - github.com/go-openapi/jsonreference v0.19.4 // indirect - github.com/go-openapi/spec v0.19.11 // indirect - github.com/go-openapi/swag v0.19.11 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/spec v0.20.6 // indirect + github.com/go-openapi/swag v0.19.15 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/gopacket v1.1.19 // indirect @@ -243,7 +246,7 @@ require ( github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jessevdk/go-flags v1.4.0 // indirect + github.com/jessevdk/go-flags v1.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect @@ -255,7 +258,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.33.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.13.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.14.1 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-maddr-filter v0.1.0 // indirect @@ -264,7 +267,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/magefile/mage v1.9.0 // indirect + github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.14 // indirect @@ -276,7 +279,6 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.15.2 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -285,7 +287,7 @@ require ( github.com/multiformats/go-multistream v0.6.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nikkolasg/hexjson v0.1.0 // indirect - github.com/nkovacs/streamquote v1.0.0 // indirect + github.com/nkovacs/streamquote v1.1.0 // indirect github.com/onsi/ginkgo/v2 v2.23.4 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -322,15 +324,16 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/supranational/blst v0.3.13 // indirect + github.com/swaggo/files/v2 v2.0.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/twmb/murmur3 v1.1.6 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasttemplate v1.0.1 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect @@ -339,6 +342,7 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-filecoin-go v1.1.0 // indirect diff --git a/go.sum b/go.sum index d696b2ce3..4fed7203f 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivyb github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -69,20 +71,17 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw= +github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 h1:T3+cD5fYvuH36h7EZq+TDpm+d8a6FSD4pQsbmuGGQ8o= github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -214,6 +213,7 @@ github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOV github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/daaku/go.zipexe v1.0.2 h1:Zg55YLYTr7M9wjKn8SY/WcpuuEi+kR2u4E8RhvpyXmk= @@ -293,8 +293,9 @@ github.com/filecoin-project/go-address v1.2.0/go.mod h1:kQEQ4qZ99a51X7DjT9HiMT4y github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= -github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.1 h1:n+nczYe6VedXmdtAXygRuey246YnYyuY1NPrmy2iK6s= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.1/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= github.com/filecoin-project/go-amt-ipld/v4 v4.4.0 h1:6kvvMeSpIy4GTU5t3vPHZgWYIMRzGRKLJ73s/cltsoc= github.com/filecoin-project/go-amt-ipld/v4 v4.4.0/go.mod h1:msgmUxTyRBZ6iXt+5dnUDnIb7SEFqdPsbB1wyo/G3ts= @@ -316,14 +317,16 @@ github.com/filecoin-project/go-commp-utils/v2 v2.1.0/go.mod h1:NbxJYlhxtWaNhlVCj github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/WEqcbpIsGI0= github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= +github.com/filecoin-project/go-data-segment v0.0.1 h1:1wmDxOG4ubWQm3ZC1XI5nCon5qgSq7Ra3Rb6Dbu10Gs= +github.com/filecoin-project/go-data-segment v0.0.1/go.mod h1:H0/NKbsRxmRFBcLibmABv+yFNHdmtl5AyplYLnb0Zv4= github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 h1:v+zJS5B6pA3ptWZS4t8tbt1Hz9qENnN4nVr1w99aSWc= github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7/go.mod h1:V3Y4KbttaCwyg1gwkP7iai8CbQx4mZUGjd3h9GZWLKE= github.com/filecoin-project/go-ds-versioning v0.1.2 h1:to4pTadv3IeV1wvgbCbN6Vqd+fu+7tveXgv/rCEZy6w= github.com/filecoin-project/go-ds-versioning v0.1.2/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-f3 v0.8.10 h1:Mm+daAn9EKqTTDY3ICbPTR2i3Opjb4gr6Y7bJ8oCA84= github.com/filecoin-project/go-f3 v0.8.10/go.mod h1:hFvb2CMxHDmlJAVzfiIL/V8zCtNMQqfSnhP5TyM6CHI= -github.com/filecoin-project/go-fil-commcid v0.2.0 h1:B+5UX8XGgdg/XsdUpST4pEBviKkFOw+Fvl2bLhSKGpI= -github.com/filecoin-project/go-fil-commcid v0.2.0/go.mod h1:8yigf3JDIil+/WpqR5zoKyP0jBPCOGtEqq/K1CcMy9Q= +github.com/filecoin-project/go-fil-commcid v0.3.1 h1:4EfxpHSlvtkOqa9weG2Yt5kxFmPib2xU7Uc9Lbqk7fs= +github.com/filecoin-project/go-fil-commcid v0.3.1/go.mod h1:z7Ssf8d7kspF9QRAVHDbZ+43JK4mkhbGH5lyph1TnKY= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 h1:HYIUugzjq78YvV3vC6rL95+SfC/aSTVSnZSZiDV5pCk= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0/go.mod h1:VH3fAFOru4yyWar4626IoS5+VGE8SfZiBODJLUigEo4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= @@ -389,8 +392,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/gammazero/channelqueue v0.2.2 h1:ufNzIbeDBxNfHj0m5uwUfOwvTmHF/O40hu2ZNnvF+/8= -github.com/gammazero/channelqueue v0.2.2/go.mod h1:824o5HHE+yO1xokh36BIuSv8YWwXW0364ku91eRMFS4= github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= @@ -433,24 +434,28 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg= github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.11 h1:ogU5q8dtp3MMPn59a9VRrPKVxvJHEs5P7yNMR5sNnis= github.com/go-openapi/spec v0.19.11/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28= +github.com/go-openapi/spec v0.20.6 h1:ich1RQ3WDbfoeTqTAb+5EIxNmpKVJZWBNah9RAT0jIQ= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.8/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFpmc= github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -474,8 +479,9 @@ github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -627,8 +633,8 @@ github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3 github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= -github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= +github.com/icza/backscanner v0.0.0-20240328210400-b40c3a86dec5 h1:FcxwOojw6pUiPpsf7Q6Fw/pI+7cR6FlapLBEGV/902A= +github.com/icza/backscanner v0.0.0-20240328210400-b40c3a86dec5/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -782,8 +788,8 @@ github.com/ipld/go-trustless-utils v0.4.1 h1:puA14381Hg2LzH724mZ5ZFKFx+FFjjT5fPF github.com/ipld/go-trustless-utils v0.4.1/go.mod h1:DgGuyfJ33goYwYVisjnxrlra0HVmZuHWVisVIkzVo1o= github.com/ipld/ipld/specs v0.0.0-20231012031213-54d3b21deda4 h1:0VXv637/xpI0Pb5J8K+K8iRtTw4DOcxs0MB1HMzfwNY= github.com/ipld/ipld/specs v0.0.0-20231012031213-54d3b21deda4/go.mod h1:WcT0DfRe+e2QFY0kcbsOnuT6jL5Q0JNZ83I5DHIdStg= -github.com/ipni/go-libipni v0.6.13 h1:6fQU6ZFu8fi0DZIs4VXZrIFbT9r97dNmNl7flWMVblE= -github.com/ipni/go-libipni v0.6.13/go.mod h1:+hNohg7Tx8ML2a/Ei19zUxCnSqtqXiHySlqHIwPhQyQ= +github.com/ipni/go-libipni v0.6.19 h1:f19SYd585pqzX5C6M8vFP1veL7fVYuBELIFfsMjOMZQ= +github.com/ipni/go-libipni v0.6.19/go.mod h1:pu+1iqsmN6TE2JcHjEKnDkLkONy1PW0rM4qjeF3jnHM= github.com/ipni/index-provider v0.15.4 h1:K64q94r6M/QFyIvRwMxC6oOv92cOixCzy/awGmsBEXI= github.com/ipni/index-provider v0.15.4/go.mod h1:R08LoUrA12fiqtDVUwLAv+g09BPY0FsCG58JvFEyVzo= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= @@ -817,8 +823,9 @@ github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0 github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= @@ -913,8 +920,8 @@ github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCv github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-pubsub v0.13.0 h1:RmFQ2XAy3zQtbt2iNPy7Tt0/3fwTnHpCQSSnmGnt1Ps= -github.com/libp2p/go-libp2p-pubsub v0.13.0/go.mod h1:m0gpUOyrXKXdE7c8FNQ9/HLfWbxaEw7xku45w+PaqZo= +github.com/libp2p/go-libp2p-pubsub v0.14.1 h1:XK/rPKZKhPvRrtsjvfwrOZPnQQbGLmaEg7u6qnJfn8U= +github.com/libp2p/go-libp2p-pubsub v0.14.1/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= @@ -959,8 +966,9 @@ github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magik6k/reflink v1.0.2-patch1 h1:NXSgQugcESI8Z/jBtuAI83YsZuRauY9i9WOyOnJ7Vns= github.com/magik6k/reflink v1.0.2-patch1/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok= @@ -1090,12 +1098,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.1.0 h1:Cgi1MSZVQFoJKYeRpBNEcdF3LB+Zo4fYKsDz7h8uJYQ= github.com/nikkolasg/hexjson v0.1.0/go.mod h1:fbGbWFZ0FmJMFbpCMtJpwb0tudVxSSZ+Es2TsCg57cA= -github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/nkovacs/streamquote v1.1.0 h1:wDY1+Hikdx4iOmZZBFLXvwLr7zj9uPIoXfijz+6ad2g= +github.com/nkovacs/streamquote v1.1.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1258,8 +1270,8 @@ github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQ github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1338,6 +1350,12 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw= +github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= +github.com/swaggo/http-swagger/v2 v2.0.2 h1:FKCdLsl+sFCx60KFsyM0rDarwiUSZ8DqbfSyIKC9OBg= +github.com/swaggo/http-swagger/v2 v2.0.2/go.mod h1:r7/GBkAWIfK6E/OLnE8fXnviHiDeAHmgIyooa4xm3AQ= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -1349,10 +1367,10 @@ github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/triplewz/poseidon v0.0.2 h1:s5QMVYnUfqvgM1eIqp7O9hHjZLVrKnkhx0E7EQTf9Nk= github.com/triplewz/poseidon v0.0.2/go.mod h1:fmoxtMcbtMUjlSJmpuS3Wk/oKSvdJpIp9YWRbsOu3T0= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= @@ -1366,8 +1384,9 @@ github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/gozstd v1.20.1 h1:xPnnnvjmaDDitMFfDxmQ4vpx0+3CdTg2o3lALvXTU/g= github.com/valyala/gozstd v1.20.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -1426,6 +1445,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= @@ -1728,6 +1749,7 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1950,6 +1972,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= @@ -1971,6 +1994,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/harmony/harmonydb/sql/20240228-piece-park.sql b/harmony/harmonydb/sql/20240228-piece-park.sql index b4fbfeffa..e74cc0634 100644 --- a/harmony/harmonydb/sql/20240228-piece-park.sql +++ b/harmony/harmonydb/sql/20240228-piece-park.sql @@ -13,6 +13,8 @@ create table parked_pieces ( -- long_term boolean not null default false, -- Added in 20240930-pdp.sql + -- skip boolean not null default false, -- Added in 20250505-market-mk20.sql to allow skipping download + -- NOTE: Following keys were dropped in 20240507-sdr-pipeline-fk-drop.sql foreign key (task_id) references harmony_task (id) on delete set null, -- dropped foreign key (cleanup_task_id) references harmony_task (id) on delete set null, -- dropped diff --git a/harmony/harmonydb/sql/20240731-market-migration.sql b/harmony/harmonydb/sql/20240731-market-migration.sql index 9ee7a71ed..41044242c 100644 --- a/harmony/harmonydb/sql/20240731-market-migration.sql +++ b/harmony/harmonydb/sql/20240731-market-migration.sql @@ -25,6 +25,7 @@ CREATE TABLE market_mk12_deals ( piece_cid TEXT NOT NULL, piece_size BIGINT NOT NULL, + -- raw_size BIGINT (Added in 20250505-market-mk20.sql) fast_retrieval BOOLEAN NOT NULL, announce_to_ipni BOOLEAN NOT NULL, @@ -54,6 +55,8 @@ CREATE TABLE market_piece_metadata ( indexed BOOLEAN NOT NULL DEFAULT FALSE, indexed_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + -- dropped in 20250505-market-mk20.sql + -- PRIMARY KEY (piece_cid, piece_size) (Added in 20250505-market-mk20.sql) constraint market_piece_meta_identity_key unique (piece_cid, piece_size) ); @@ -64,7 +67,6 @@ CREATE TABLE market_piece_metadata ( -- Cleanup for this table will be created in a later stage. CREATE TABLE market_piece_deal ( id TEXT NOT NULL, -- (UUID for new deals, PropCID for old) - piece_cid TEXT NOT NULL, boost_deal BOOLEAN NOT NULL, legacy_deal BOOLEAN NOT NULL DEFAULT FALSE, @@ -73,11 +75,16 @@ CREATE TABLE market_piece_deal ( sp_id BIGINT NOT NULL, sector_num BIGINT NOT NULL, + piece_offset BIGINT NOT NULL, -- NOT NULL dropped in 20250505-market-mk20.sql + + -- piece_ref BIGINT (Added in 20250505-market-mk20.sql) - piece_offset BIGINT NOT NULL, + piece_cid TEXT NOT NULL, piece_length BIGINT NOT NULL, raw_size BIGINT NOT NULL, + -- Dropped both constraint and primary key in 20250505-market-mk20.sql + -- ADD PRIMARY KEY (id, sp_id, piece_cid, piece_length) (Added in 20250505-market-mk20.sql) primary key (sp_id, piece_cid, id), constraint market_piece_deal_identity_key unique (sp_id, id) @@ -227,6 +234,7 @@ CREATE TABLE market_direct_deals ( piece_cid TEXT NOT NULL, piece_size BIGINT NOT NULL, + -- raw_size BIGINT (Added in 20250505-market-mk20.sql) fast_retrieval BOOLEAN NOT NULL, announce_to_ipni BOOLEAN NOT NULL, diff --git a/harmony/harmonydb/sql/20240823-ipni.sql b/harmony/harmonydb/sql/20240823-ipni.sql index b9b9117c2..de14410c3 100644 --- a/harmony/harmonydb/sql/20240823-ipni.sql +++ b/harmony/harmonydb/sql/20240823-ipni.sql @@ -9,7 +9,7 @@ CREATE TABLE ipni ( order_number BIGSERIAL PRIMARY KEY, -- Unique increasing order number ad_cid TEXT NOT NULL, context_id BYTEA NOT NULL, -- abi.PieceInfo in Curio - -- metadata column in not required as Curio only supports one type of metadata(HTTP) + -- metadata BYTEA NOT NULL DEFAULT '\xa01200' (Added in 20250505-market-mk20.sql) is_rm BOOLEAN NOT NULL, -- skip added in 20241106-market-fixes.sql @@ -26,6 +26,8 @@ CREATE TABLE ipni ( piece_cid TEXT NOT NULL, -- For easy look up piece_size BIGINT NOT NULL, -- For easy look up + -- piece_cid_v2 TEXT (Added in 20250505-market-mk20.sql) -- For easy lookup + unique (ad_cid) ); @@ -56,7 +58,7 @@ CREATE TABLE ipni_head ( -- on-disk .car block headers or from data in the piece index database. CREATE TABLE ipni_chunks ( cid TEXT PRIMARY KEY, -- CID of the chunk - piece_cid TEXT NOT NULL, -- Related Piece CID + piece_cid TEXT NOT NULL, -- Related Piece CID V2 chunk_num INTEGER NOT NULL, -- Chunk number within the piece. Chunk 0 has no "next" link. first_cid TEXT, -- In case of db-based chunks, the CID of the first cid in the chunk start_offset BIGINT, -- In case of .car-based chunks, the offset in the .car file where the chunk starts @@ -87,6 +89,8 @@ CREATE TABLE ipni_task ( task_id BIGINT DEFAULT NULL, complete BOOLEAN DEFAULT FALSE, + -- id TEXT (Added in 20250505-market-mk20.sql) + PRIMARY KEY (provider, context_id, is_rm) ); @@ -175,24 +179,24 @@ BEGIN -- If a different is_rm exists for the same context_id and provider, insert the new task IF FOUND THEN - INSERT INTO ipni_task (sp_id, sector, reg_seal_proof, sector_offset, provider, context_id, is_rm, created_at, task_id, complete) - VALUES (_sp_id, _sector, _reg_seal_proof, _sector_offset, _provider, _context_id, _is_rm, TIMEZONE('UTC', NOW()), _task_id, FALSE); - RETURN; + INSERT INTO ipni_task (sp_id, sector, reg_seal_proof, sector_offset, provider, context_id, is_rm, created_at, task_id, complete) + VALUES (_sp_id, _sector, _reg_seal_proof, _sector_offset, _provider, _context_id, _is_rm, TIMEZONE('UTC', NOW()), _task_id, FALSE); + RETURN; END IF; - -- If no conflicting entry is found in ipni_task, check the latest ad in ipni table + -- If no conflicting entry is found in ipni_task, check the latest ad in ipni table SELECT is_rm INTO _latest_is_rm FROM ipni WHERE provider = _provider AND context_id = _context_id ORDER BY order_number DESC - LIMIT 1; + LIMIT 1; -- If the latest ad has the same is_rm value, raise an exception IF FOUND AND _latest_is_rm = _is_rm THEN RAISE EXCEPTION 'already published'; END IF; - -- If all conditions are met, insert the new task into ipni_task + -- If all conditions are met, insert the new task into ipni_task INSERT INTO ipni_task (sp_id, sector, reg_seal_proof, sector_offset, provider, context_id, is_rm, created_at, task_id, complete) VALUES (_sp_id, _sector, _reg_seal_proof, _sector_offset, _provider, _context_id, _is_rm, TIMEZONE('UTC', NOW()), _task_id, FALSE); END; diff --git a/harmony/harmonydb/sql/20250505-market-mk20.sql b/harmony/harmonydb/sql/20250505-market-mk20.sql new file mode 100644 index 000000000..303ba0979 --- /dev/null +++ b/harmony/harmonydb/sql/20250505-market-mk20.sql @@ -0,0 +1,879 @@ +-- Add raw_size column to mk12 deals to calculate pieceCidV2 +ALTER TABLE market_mk12_deals + ADD COLUMN raw_size BIGINT; + +-- Add raw_size column to mk12-ddo deals to calculate pieceCidV2 +ALTER TABLE market_direct_deals + ADD COLUMN raw_size BIGINT; + +-- Drop the existing primary key constraint for market_piece_metadata +ALTER TABLE market_piece_metadata +DROP CONSTRAINT market_piece_metadata_pkey; + +-- Drop the redundant UNIQUE constraint if it exists for market_piece_metadata +ALTER TABLE market_piece_metadata +DROP CONSTRAINT IF EXISTS market_piece_meta_identity_key; + +-- Add the new composite primary key for market_piece_metadata +ALTER TABLE market_piece_metadata + ADD PRIMARY KEY (piece_cid, piece_size); + +-- Drop the current primary key for market_piece_deal +ALTER TABLE market_piece_deal +DROP CONSTRAINT market_piece_deal_pkey; + +-- Drop the old UNIQUE constraint for market_piece_deal +ALTER TABLE market_piece_deal +DROP CONSTRAINT IF EXISTS market_piece_deal_identity_key; + +-- Add the new composite primary key for market_piece_deal +ALTER TABLE market_piece_deal + ADD PRIMARY KEY (id, sp_id, piece_cid, piece_length); + +-- Add a column to relate a piece park piece to mk20 deal +ALTER TABLE market_piece_deal +ADD COLUMN piece_ref BIGINT; + +-- Allow piece_offset to be null for PDP deals +ALTER TABLE market_piece_deal + ALTER COLUMN piece_offset DROP NOT NULL; + +-- Add column to skip scheduling piece_park. Used for upload pieces +ALTER TABLE parked_pieces + ADD COLUMN skip BOOLEAN NOT NULL DEFAULT FALSE; + +-- Add column piece_cid_v2 to IPNI table +ALTER TABLE ipni + ADD COLUMN piece_cid_v2 TEXT; + +-- Add metadata column to IPNI table which defaults to binary of IpfsGatewayHttp +ALTER TABLE ipni + ADD COLUMN metadata BYTEA NOT NULL DEFAULT '\xa01200'; + +-- The order_number column must be completely sequential +ALTER SEQUENCE ipni_order_number_seq CACHE 1; + +-- This function is used to insert piece metadata and piece deal (piece indexing) +-- This makes it easy to keep the logic of how table is updated and fast (in DB). +CREATE OR REPLACE FUNCTION process_piece_deal( + _id TEXT, + _piece_cid TEXT, + _boost_deal BOOLEAN, + _sp_id BIGINT, + _sector_num BIGINT, + _piece_offset BIGINT, + _piece_length BIGINT, -- padded length + _raw_size BIGINT, + _indexed BOOLEAN, + _piece_ref BIGINT DEFAULT NULL, + _legacy_deal BOOLEAN DEFAULT FALSE, + _chain_deal_id BIGINT DEFAULT 0 +) + RETURNS VOID AS $$ +BEGIN + -- Insert or update the market_piece_metadata table + INSERT INTO market_piece_metadata (piece_cid, piece_size, indexed) + VALUES (_piece_cid, _piece_length, _indexed) + ON CONFLICT (piece_cid, piece_size) DO UPDATE SET + indexed = CASE + WHEN market_piece_metadata.indexed = FALSE THEN EXCLUDED.indexed + ELSE market_piece_metadata.indexed + END; + + -- Insert into the market_piece_deal table + INSERT INTO market_piece_deal ( + id, piece_cid, boost_deal, legacy_deal, chain_deal_id, + sp_id, sector_num, piece_offset, piece_length, raw_size, piece_ref + ) VALUES ( + _id, _piece_cid, _boost_deal, _legacy_deal, _chain_deal_id, + _sp_id, _sector_num, _piece_offset, _piece_length, _raw_size, _piece_ref + ) ON CONFLICT (id, sp_id, piece_cid, piece_length) DO NOTHING; + +END; +$$ LANGUAGE plpgsql; + +-- Add ID column to ipni_task table +ALTER TABLE ipni_task + ADD COLUMN id TEXT; + +-- Function to create ipni tasks +CREATE OR REPLACE FUNCTION insert_ipni_task( + _id TEXT, + _sp_id BIGINT, + _sector BIGINT, + _reg_seal_proof INT, + _sector_offset BIGINT, + _context_id BYTEA, + _is_rm BOOLEAN, + _provider TEXT, + _task_id BIGINT DEFAULT NULL +) RETURNS VOID AS $$ +DECLARE +_existing_is_rm BOOLEAN; + _latest_is_rm BOOLEAN; +BEGIN + -- Check if ipni_task has the same context_id and provider with a different is_rm value + SELECT is_rm INTO _existing_is_rm + FROM ipni_task + WHERE provider = _provider AND context_id = _context_id AND is_rm != _is_rm + LIMIT 1; + + -- If a different is_rm exists for the same context_id and provider, insert the new task + IF FOUND THEN + INSERT INTO ipni_task (sp_id, id, sector, reg_seal_proof, sector_offset, provider, context_id, is_rm, created_at, task_id, complete) + VALUES (_sp_id, _id, _sector, _reg_seal_proof, _sector_offset, _provider, _context_id, _is_rm, TIMEZONE('UTC', NOW()), _task_id, FALSE); + RETURN; + END IF; + + -- If no conflicting entry is found in ipni_task, check the latest ad in ipni table + SELECT is_rm INTO _latest_is_rm + FROM ipni + WHERE provider = _provider AND context_id = _context_id + ORDER BY order_number DESC + LIMIT 1; + + -- If the latest ad has the same is_rm value, raise an exception + IF FOUND AND _latest_is_rm = _is_rm THEN + RAISE EXCEPTION 'already published'; + END IF; + + -- If all conditions are met, insert the new task into ipni_task + INSERT INTO ipni_task (sp_id, id, sector, reg_seal_proof, sector_offset, provider, context_id, is_rm, created_at, task_id, complete) + VALUES (_sp_id, _id, _sector, _reg_seal_proof, _sector_offset, _provider, _context_id, _is_rm, TIMEZONE('UTC', NOW()), _task_id, FALSE); +END; +$$ LANGUAGE plpgsql; + + +-- Update raw_size for existing deals (One time backfill migration) +BEGIN; + UPDATE market_mk12_deals d + SET raw_size = mpd.raw_size + FROM market_piece_deal mpd + WHERE d.uuid = mpd.id; + + UPDATE market_direct_deals d + SET raw_size = mpd.raw_size + FROM market_piece_deal mpd + WHERE d.uuid = mpd.id; + + UPDATE market_mk12_deals d + SET raw_size = p.raw_size + FROM market_mk12_deal_pipeline p + WHERE d.uuid = p.uuid + AND d.raw_size IS NULL + AND p.raw_size IS NOT NULL; + + UPDATE market_direct_deals d + SET raw_size = p.raw_size + FROM market_mk12_deal_pipeline p + WHERE d.uuid = p.uuid + AND d.raw_size IS NULL + AND p.raw_size IS NOT NULL; +COMMIT; + +-- This is main MK20 Deal table. Rows are added per deal and some +-- modification is allowed later +CREATE TABLE market_mk20_deal ( + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + id TEXT PRIMARY KEY, + client TEXT NOT NULL, + + piece_cid_v2 TEXT, + + data JSONB NOT NULL DEFAULT 'null', + + ddo_v1 JSONB NOT NULL DEFAULT 'null', + retrieval_v1 JSONB NOT NULL DEFAULT 'null', + pdp_v1 JSONB NOT NULL DEFAULT 'null' +); +COMMENT ON COLUMN market_mk20_deal.id IS 'This is ULID TEXT'; +COMMENT ON COLUMN market_mk20_deal.client IS 'Client must always be text as this can be a non Filecoin address like ed25519'; + +-- This is main pipeline table for PoRep processing of MK20 deals +CREATE TABLE market_mk20_pipeline ( + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + id TEXT NOT NULL, + sp_id BIGINT NOT NULL, + contract TEXT NOT NULL, + client TEXT NOT NULL, + piece_cid_v2 TEXT NOT NULL, + piece_cid TEXT NOT NULL, -- This is pieceCid V1 to allow easy table lookups + piece_size BIGINT NOT NULL, + raw_size BIGINT NOT NULL, + offline BOOLEAN NOT NULL, + url TEXT DEFAULT NULL, + indexing BOOLEAN NOT NULL, + announce BOOLEAN NOT NULL, + allocation_id BIGINT DEFAULT NULL, + duration BIGINT NOT NULL, + piece_aggregation INT NOT NULL DEFAULT 0, -- This is set when user sends a aggregated piece. It is also set as `deal_aggregation` when deal is aggregated on SP side. + + started BOOLEAN DEFAULT FALSE, + + downloaded BOOLEAN DEFAULT FALSE, + + commp_task_id BIGINT DEFAULT NULL, + after_commp BOOLEAN DEFAULT FALSE, + + deal_aggregation INT NOT NULL DEFAULT 0, + aggr_index BIGINT DEFAULT 0, + agg_task_id BIGINT DEFAULT NULL, + aggregated BOOLEAN DEFAULT FALSE, + + sector BIGINT DEFAULT NULL, + reg_seal_proof INT DEFAULT NULL, + sector_offset BIGINT DEFAULT NULL, -- padded offset + + sealed BOOLEAN DEFAULT FALSE, + + indexing_created_at TIMESTAMPTZ DEFAULT NULL, + indexing_task_id BIGINT DEFAULT NULL, + indexed BOOLEAN DEFAULT FALSE, + + complete BOOLEAN NOT NULL DEFAULT FALSE, + + PRIMARY KEY (id, aggr_index) +); +COMMENT ON COLUMN market_mk20_pipeline.piece_aggregation IS 'This is set when user sends a aggregated piece. It is also set as `deal_aggregation` when deal is aggregated on SP side.'; +COMMENT ON COLUMN market_mk20_pipeline.deal_aggregation IS 'This is set when user sends a deal with aggregated source. This value is passed to piece_aggregation when aggregation is finished and a single piece remains'; + +-- This table is used to hold MK20 deals waiting for PoRep pipeline +-- to process. This allows disconnecting the need to immediately process +-- deals as received and allow upload later strategy to work +CREATE TABLE market_mk20_pipeline_waiting ( + id TEXT PRIMARY KEY +); + +-- This table is used to keep track of deals which need data upload. +-- A separate table helps easier status check, chunked+serial upload support +CREATE TABLE market_mk20_upload_waiting ( + id TEXT PRIMARY KEY, + chunked BOOLEAN DEFAULT NULL, + ref_id BIGINT DEFAULT NULL, + ready_at TIMESTAMPTZ DEFAULT NULL +); + +-- This table help disconnected downloads from main PoRep/PDP pipelines +-- It helps with allowing multiple downloads per deal i.e. server side aggregation. +-- This also allows us to reuse ongoing downloads within the same deal aggregation. +-- It also allows using a common download pipeline for both PoRep and PDP. +CREATE TABLE market_mk20_download_pipeline ( + id TEXT NOT NULL, + product TEXT NOT NULL, -- This allows us to run multiple refs per product for easier lifecycle management + piece_cid_v2 TEXT NOT NULL, + ref_ids BIGINT[] NOT NULL, + PRIMARY KEY (id, product, piece_cid_v2) +); + +-- Offline URLs for PoRep deals. +CREATE TABLE market_mk20_offline_urls ( + id TEXT NOT NULL, + piece_cid_v2 TEXT NOT NULL, + url TEXT NOT NULL, + headers jsonb NOT NULL DEFAULT '{}', + PRIMARY KEY (id, piece_cid_v2) +); + +-- This table tracks the chunk upload progress for a MK20 deal. Common for both +-- PoRep and PDP +CREATE TABLE market_mk20_deal_chunk ( + id TEXT not null, + chunk INT not null, + chunk_size BIGINT not null, + ref_id BIGINT DEFAULT NULL, + complete BOOLEAN DEFAULT FALSE, + completed_at TIMESTAMPTZ, + finalize BOOLEAN DEFAULT FALSE, + finalize_task_id BIGINT DEFAULT NULL, + PRIMARY KEY (id, chunk) +); + +-- MK20 product and their status table +CREATE TABLE market_mk20_products ( + name TEXT PRIMARY KEY, + enabled BOOLEAN DEFAULT TRUE +); + +-- MK20 supported data sources and their status table +CREATE TABLE market_mk20_data_source ( + name TEXT PRIMARY KEY, + enabled BOOLEAN DEFAULT TRUE +); + +-- Add products and data sources to table +INSERT INTO market_mk20_products (name, enabled) VALUES ('ddo_v1', TRUE); +INSERT INTO market_mk20_products (name, enabled) VALUES ('retrieval_v1', TRUE); +INSERT INTO market_mk20_products (name, enabled) VALUES ('pdp_v1', TRUE); +INSERT INTO market_mk20_data_source (name, enabled) VALUES ('http', TRUE); +INSERT INTO market_mk20_data_source (name, enabled) VALUES ('aggregate', TRUE); +INSERT INTO market_mk20_data_source (name, enabled) VALUES ('offline', TRUE); +INSERT INTO market_mk20_data_source (name, enabled) VALUES ('put', TRUE); + +-- This function sets an upload completion time. It is used to removed +-- upload for deal which are not finalized in 1 hour so we don't waste space. +CREATE OR REPLACE FUNCTION set_ready_at_for_serial_upload() +RETURNS TRIGGER AS $$ +BEGIN + -- Transition into "serial ready" state: chunked=false AND ref_id IS NOT NULL + IF NEW.chunked IS FALSE + AND NEW.ref_id IS NOT NULL + AND OLD.ready_at IS NULL + AND NOT (OLD.chunked IS FALSE AND OLD.ref_id IS NOT NULL) THEN + NEW.ready_at := NOW() AT TIME ZONE 'UTC'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_ready_at_serial + BEFORE UPDATE OF ref_id, chunked ON market_mk20_upload_waiting + FOR EACH ROW + EXECUTE FUNCTION set_ready_at_for_serial_upload(); + +-- This function sets an upload completion time. It is used to removed +-- upload for deal which are not finalized in 1 hour so we don't waste space. +CREATE OR REPLACE FUNCTION set_ready_at_when_all_chunks_complete() +RETURNS TRIGGER AS $$ +BEGIN + -- Only react when a chunk transitions to complete = true + IF (TG_OP = 'UPDATE' OR TG_OP = 'INSERT') AND NEW.complete IS TRUE THEN + -- If no incomplete chunks remain, set ready_at once + IF NOT EXISTS ( + SELECT 1 FROM market_mk20_deal_chunk + WHERE id = NEW.id AND (complete IS NOT TRUE) + ) THEN + UPDATE market_mk20_upload_waiting + SET ready_at = NOW() AT TIME ZONE 'UTC' + WHERE id = NEW.id + AND chunked = true + AND ready_at IS NULL; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + + +CREATE TRIGGER trg_ready_at_chunks_update + AFTER INSERT OR UPDATE OF complete ON market_mk20_deal_chunk + FOR EACH ROW + EXECUTE FUNCTION set_ready_at_when_all_chunks_complete(); + +-- This function triggers a download for an offline piece. +-- It is different from MK1.2 PoRep pipeline as it downloads the offline pieces +-- locally. This is to allow serving retrievals with piece park. +CREATE OR REPLACE FUNCTION process_offline_download( + _id TEXT, + _piece_cid_v2 TEXT, + _piece_cid TEXT, + _piece_size BIGINT, + _product TEXT +) RETURNS BOOLEAN AS $$ +DECLARE + _url TEXT; + _headers JSONB; + _raw_size BIGINT; + _deal_aggregation INT; + _piece_id BIGINT; + _ref_id BIGINT; +BEGIN + -- 1. Early exit if no offline match found + SELECT url, headers + INTO _url, _headers + FROM market_mk20_offline_urls + WHERE id = _id AND piece_cid_v2 = _piece_cid_v2; + + IF NOT FOUND THEN + RETURN FALSE; + END IF; + + -- 2. Get deal_aggregation flag + SELECT deal_aggregation + INTO _deal_aggregation + FROM market_mk20_pipeline + WHERE id = _id AND piece_cid_v2 = _piece_cid_v2 LIMIT 1; + + -- 3. Look for an existing piece + SELECT id + INTO _piece_id + FROM parked_pieces + WHERE piece_cid = _piece_cid AND piece_padded_size = _piece_size; + + -- 4. Insert piece if it is not found + IF NOT FOUND THEN + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + VALUES (_piece_cid, _piece_size, _raw_size, NOT (_deal_aggregation > 0)) + RETURNING id INTO _piece_id; + END IF; + + -- 5. Insert piece ref + INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + VALUES (_piece_id, _url, _headers, NOT (_deal_aggregation > 0)) + RETURNING ref_id INTO _ref_id; + + -- 6. Insert or update download pipeline with ref_id + INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) + VALUES (_id, _piece_cid_v2, _product, ARRAY[_ref_id]) + ON CONFLICT (id, piece_cid_v2, product) DO UPDATE + SET ref_ids = ( + SELECT ARRAY( + SELECT DISTINCT r + FROM unnest(market_mk20_download_pipeline.ref_ids || excluded.ref_ids) AS r + ) + ); + + -- 7. Mark the deal as started + UPDATE market_mk20_pipeline + SET started = TRUE + WHERE id = _id AND piece_cid_v2 = _piece_cid_v2 AND started = FALSE; + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +-- Main DataSet table for PDP +CREATE TABLE pdp_data_set ( + id BIGINT PRIMARY KEY, -- on-chain dataset id + client TEXT NOT NULL, -- client wallet which requested this dataset + + -- updated when a challenge is requested (either by first dataset add or by invokes of nextProvingPeriod) + -- initially NULL on fresh dataset + prev_challenge_request_epoch BIGINT, + + -- task invoking nextProvingPeriod, the task should be spawned any time prove_at_epoch+challenge_window is in the past + challenge_request_task_id BIGINT REFERENCES harmony_task(id) ON DELETE SET NULL, + + -- nextProvingPeriod message hash, when the message lands prove_task_id will be spawned and + -- this value will be set to NULL + challenge_request_msg_hash TEXT, + + -- the proving period for this proofset and the challenge window duration + proving_period BIGINT, + challenge_window BIGINT, + + -- the epoch at which the next challenge window starts and proofs can be submitted + -- initialized to NULL indicating a special proving period init task handles challenge generation + prove_at_epoch BIGINT, + + -- flag indicating that the proving period is ready for init. Currently set after first add + -- Set to true after first root add + init_ready BOOLEAN NOT NULL DEFAULT FALSE, + + create_deal_id TEXT NOT NULL, -- mk20 deal ID for creating this data_set + create_message_hash TEXT NOT NULL, + + removed BOOLEAN DEFAULT FALSE, + + remove_deal_id TEXT DEFAULT NULL, -- mk20 deal ID for removing this data_set + remove_message_hash TEXT DEFAULT NULL, + + unique (create_deal_id), + unique (remove_deal_id) +); + +-- DataSet create table governs the DataSet create task +CREATE TABLE pdp_data_set_create ( + id TEXT PRIMARY KEY, -- This is Market V2 Deal ID for lookup and response + client TEXT NOT NULL, + + record_keeper TEXT NOT NULL, + extra_data BYTEA, + + task_id BIGINT DEFAULT NULL, + tx_hash TEXT DEFAULT NULL +); + +-- DataSet delete table governs the DataSet delete task +CREATE TABLE pdp_data_set_delete ( + id TEXT PRIMARY KEY, -- This is Market V2 Deal ID for lookup and response + client TEXT NOT NULL, + + set_id BIGINT NOT NULL, + extra_data BYTEA, + + task_id BIGINT DEFAULT NULL, + tx_hash TEXT DEFAULT NULL +); + +-- This table governs the delete piece tasks +CREATE TABLE pdp_piece_delete ( + id TEXT PRIMARY KEY, -- This is Market V2 Deal ID for lookup and response + client TEXT NOT NULL, + + set_id BIGINT NOT NULL, + pieces BIGINT[] NOT NULL, + extra_data BYTEA, + + task_id BIGINT DEFAULT NULL, + tx_hash TEXT DEFAULT NULL +); + +-- Main DataSet Piece table. Any and all pieces ever added by SP must be part of this table +CREATE TABLE pdp_dataset_piece ( + data_set_id BIGINT NOT NULL, -- pdp_data_sets.id + client TEXT NOT NULL, + + piece_cid_v2 TEXT NOT NULL, -- root cid (piececid v2) + + piece BIGINT DEFAULT NULL, -- on-chain index of the piece in the pieceCids sub-array + + piece_ref BIGINT NOT NULL, -- piece_ref_id + + add_deal_id TEXT NOT NULL, -- mk20 deal ID for adding this root to dataset + add_message_hash TEXT NOT NULL, + add_message_index BIGINT NOT NULL, -- index of root in the add message + + removed BOOLEAN DEFAULT FALSE, + remove_deal_id TEXT DEFAULT NULL, -- mk20 deal ID for removing this root from dataset + remove_message_hash TEXT DEFAULT NULL, + remove_message_index BIGINT DEFAULT NULL, + + PRIMARY KEY (data_set_id, piece) +); + +CREATE TABLE pdp_pipeline ( + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + + id TEXT NOT NULL, + client TEXT NOT NULL, + + piece_cid_v2 TEXT NOT NULL, -- v2 piece_cid + + data_set_id BIGINT NOT NULL, + + extra_data BYTEA, + + piece_ref BIGINT DEFAULT NULL, + + downloaded BOOLEAN DEFAULT FALSE, + + commp_task_id BIGINT DEFAULT NULL, + after_commp BOOLEAN DEFAULT FALSE, + + deal_aggregation INT NOT NULL DEFAULT 0, + aggr_index BIGINT DEFAULT 0, + agg_task_id BIGINT DEFAULT NULL, + aggregated BOOLEAN DEFAULT FALSE, + + add_piece_task_id BIGINT DEFAULT NULL, + after_add_piece BOOLEAN DEFAULT FALSE, + + add_message_hash TEXT, + add_message_index BIGINT NOT NULL DEFAULT 0, -- index of root in the add message + + after_add_piece_msg BOOLEAN DEFAULT FALSE, + + save_cache_task_id BIGINT DEFAULT NULL, + after_save_cache BOOLEAN DEFAULT FALSE, + + indexing BOOLEAN DEFAULT FALSE, + indexing_created_at TIMESTAMPTZ DEFAULT NULL, + indexing_task_id BIGINT DEFAULT NULL, + indexed BOOLEAN DEFAULT FALSE, + + announce BOOLEAN DEFAULT FALSE, + announce_payload BOOLEAN DEFAULT FALSE, + + announced BOOLEAN DEFAULT FALSE, + announced_payload BOOLEAN DEFAULT FALSE, + + complete BOOLEAN DEFAULT FALSE, + + PRIMARY KEY (id, aggr_index) +); + +-- This function is used to mark a piece as downloaded in pdp_pipeline +-- A deal with multiple HTTP sources will have multiple ref_ids, +-- and download is handled by market_mk20_download_pipeline table +-- We add ref_id to pdp_pipeline once download is successful. +create or replace function mk20_pdp_mark_downloaded(_product text) +returns integer +language plpgsql +as $$ +declare + updated_count int := 0; +begin + with candidates as ( + select p.id, p.piece_cid_v2, dp.ref_ids + from pdp_pipeline p + join market_mk20_download_pipeline dp + on dp.id = p.id + and dp.piece_cid_v2 = p.piece_cid_v2 + and dp.product = _product + where p.piece_ref is null + ), + picked as ( + -- choose ONE completed ref_id from the array for each (id,piece_cid_v2) + select c.id, c.piece_cid_v2, c.ref_ids, ch.ref_id as chosen_ref + from candidates c + cross join lateral ( + select pr.ref_id + from unnest(c.ref_ids) as r(ref_id) + join parked_piece_refs pr on pr.ref_id = r.ref_id + join parked_pieces pp on pp.id = pr.piece_id + where pp.complete = true + limit 1 + ) ch + ), + del_other_refs as ( + delete from parked_piece_refs pr + using picked + where pr.ref_id = any(picked.ref_ids) + and pr.ref_id != picked.chosen_ref + returning 1 + ), + del_download_rows as ( + delete from market_mk20_download_pipeline dp + using picked + where dp.id = picked.id + and dp.piece_cid_v2 = picked.piece_cid_v2 + and dp.product = _product + returning 1 + ), + upd as ( + update pdp_pipeline p + set downloaded = true, + piece_ref = picked.chosen_ref + from picked + where p.id = picked.id + and p.piece_cid_v2 = picked.piece_cid_v2 + returning 1 + ) + select count(*) into updated_count from upd; + + return updated_count; +end; +$$; + +CREATE TABLE market_mk20_clients ( + client TEXT PRIMARY KEY, + allowed BOOLEAN DEFAULT TRUE +); + +CREATE TABLE pdp_proving_tasks ( + data_set_id BIGINT NOT NULL, -- pdp_data_set.id + task_id BIGINT NOT NULL, -- harmony_task task ID + + PRIMARY KEY (data_set_id, task_id), + FOREIGN KEY (data_set_id) REFERENCES pdp_data_set(id) ON DELETE CASCADE, + FOREIGN KEY (task_id) REFERENCES harmony_task(id) ON DELETE CASCADE +); + +-- IPNI pipeline is kept separate from rest for robustness +-- and reuse. This allows for removing, recreating ads using CLI. +CREATE TABLE pdp_ipni_task ( + context_id BYTEA NOT NULL, + is_rm BOOLEAN NOT NULL, + + id TEXT NOT NULL, + + provider TEXT NOT NULL, + + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + task_id BIGINT DEFAULT NULL, + complete BOOLEAN DEFAULT FALSE, + + PRIMARY KEY (context_id, is_rm) +); + +-- Function to create ipni tasks +CREATE OR REPLACE FUNCTION insert_pdp_ipni_task( + _context_id BYTEA, + _is_rm BOOLEAN, + _id TEXT, + _provider TEXT, + _task_id BIGINT DEFAULT NULL +) RETURNS VOID AS $$ +DECLARE + _existing_is_rm BOOLEAN; + _latest_is_rm BOOLEAN; +BEGIN + -- Check if ipni_task has the same context_id and provider with a different is_rm value + SELECT is_rm INTO _existing_is_rm + FROM pdp_ipni_task + WHERE provider = _provider AND context_id = _context_id AND is_rm != _is_rm + LIMIT 1; + + -- If a different is_rm exists for the same context_id and provider, insert the new task + IF FOUND THEN + INSERT INTO pdp_ipni_task (context_id, is_rm, id, provider, task_id, created_at) + VALUES (_context_id, _is_rm, _id, _provider, _task_id); + RETURN; + END IF; + + -- If no conflicting entry is found in ipni_task, check the latest ad in ipni table + SELECT is_rm INTO _latest_is_rm + FROM ipni + WHERE provider = _provider AND context_id = _context_id + ORDER BY order_number DESC + LIMIT 1; + + -- If the latest ad has the same is_rm value, raise an exception + IF FOUND AND _latest_is_rm = _is_rm THEN + RAISE EXCEPTION 'already published'; + END IF; + + -- If all conditions are met, insert the new task into ipni_task + INSERT INTO pdp_ipni_task (context_id, is_rm, id, provider, task_id) + VALUES (_context_id, _is_rm, _id, _provider, _task_id); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION insert_ad_and_update_head( + _ad_cid TEXT, + _context_id BYTEA, + _metadata BYTEA, + _piece_cid_v2 TEXT, + _piece_cid TEXT, + _piece_size BIGINT, + _is_rm BOOLEAN, + _provider TEXT, + _addresses TEXT, + _signature BYTEA, + _entries TEXT +) RETURNS VOID AS $$ +DECLARE + _previous TEXT; + _new_order BIGINT; +BEGIN + -- Determine the previous ad_cid in the chain for this provider + SELECT head INTO _previous + FROM ipni_head + WHERE provider = _provider; + + -- Insert the new ad into the ipni table with an automatically assigned order_number + INSERT INTO ipni (ad_cid, context_id, metadata, is_rm, previous, provider, addresses, signature, entries, piece_cid_v2, piece_cid, piece_size) + VALUES (_ad_cid, _context_id, _metadata, _is_rm, _previous, _provider, _addresses, _signature, _entries, _piece_cid_v2, _piece_cid, _piece_size); + + -- Update the ipni_head table to set the new ad as the head of the chain + INSERT INTO ipni_head (provider, head) + VALUES (_provider, _ad_cid) + ON CONFLICT (provider) DO UPDATE SET head = EXCLUDED.head; + +END; +$$ LANGUAGE plpgsql; + + +CREATE TABLE piece_cleanup ( + id TEXT NOT NULL, + piece_cid_v2 TEXT NOT NULL, + pdp BOOLEAN NOT NULL, + + task_id BIGINT, + + PRIMARY KEY (id, pdp) +); + +-- This functions remove the row from market_piece_deal and then goes on to +-- clean up market_piece_metadata and parked_piece_refs as required +CREATE OR REPLACE FUNCTION remove_piece_deal( + _id TEXT, + _sp_id BIGINT, + _piece_cid TEXT, + _piece_length BIGINT +) RETURNS VOID AS $$ +DECLARE + v_piece_ref BIGINT; + v_remaining BIGINT; +BEGIN + -- 1) Delete the exact deal row and capture piece_ref + DELETE FROM market_piece_deal + WHERE id = _id + AND sp_id = _sp_id + AND piece_cid = _piece_cid + AND piece_length = _piece_length + RETURNING piece_ref + INTO v_piece_ref; + + IF NOT FOUND THEN + RAISE EXCEPTION + 'market_piece_deal not found for id=%, sp_id=%, piece_cid=%, piece_length=%', + _id, _sp_id, _piece_cid, _piece_length; + END IF; + + -- 2) If no other deals reference the same piece, remove metadata + SELECT COUNT(*) + INTO v_remaining + FROM market_piece_deal + WHERE piece_cid = _piece_cid + AND piece_length = _piece_length; + + IF v_remaining = 0 THEN + DELETE FROM market_piece_metadata + WHERE piece_cid = _piece_cid + AND piece_size = _piece_length; + -- (DELETE is idempotent even if no row exists) + END IF; + + -- 3) If present, remove the parked piece reference + IF v_piece_ref IS NOT NULL THEN + DELETE FROM parked_piece_refs + WHERE ref_id = v_piece_ref; + -- (FKs from pdp_* tables will cascade/SET NULL per their definitions) + END IF; +END; +$$ LANGUAGE plpgsql; + + +create or replace function mk20_ddo_mark_downloaded(_product text) +returns integer +language plpgsql +as $$ +declare +updated_count int := 0; +begin + with candidates as ( + select p.id, p.piece_cid_v2, dp.ref_ids + from market_mk20_pipeline p + join market_mk20_download_pipeline dp + on dp.id = p.id + and dp.piece_cid_v2 = p.piece_cid_v2 + and dp.product = _product + where p.url is null + ), + picked as ( + -- choose ONE completed ref_id from the array for each (id,piece_cid_v2) + select c.id, c.piece_cid_v2, c.ref_ids, ch.ref_id as chosen_ref + from candidates c + cross join lateral ( + select pr.ref_id + from unnest(c.ref_ids) as r(ref_id) + join parked_piece_refs pr on pr.ref_id = r.ref_id + join parked_pieces pp on pp.id = pr.piece_id + where pp.complete = true + limit 1 + ) ch + ), + del_other_refs as ( + delete from parked_piece_refs pr + using picked + where pr.ref_id = any(picked.ref_ids) + and pr.ref_id != picked.chosen_ref + returning 1 + ), + del_download_rows as ( + delete from market_mk20_download_pipeline dp + using picked + where dp.id = picked.id + and dp.piece_cid_v2 = picked.piece_cid_v2 + and dp.product = _product + returning 1 + ), + upd as ( + update market_mk20_pipeline p + set downloaded = true, + url = 'pieceref:' || picked.chosen_ref::text + from picked + where p.id = picked.id + and p.piece_cid_v2 = picked.piece_cid_v2 + returning 1 + ) + select count(*) into updated_count from upd; + + return updated_count; +end; +$$; + + diff --git a/itests/curio_test.go b/itests/curio_test.go index 2e09d553f..da6305628 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -80,7 +80,8 @@ func TestCurioHappyPath(t *testing.T) { defer db.ITestDeleteAll() - idxStore, err := indexstore.NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, 9042, config.DefaultCurioConfig()) + idxStore := indexstore.NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, 9042, config.DefaultCurioConfig()) + err = idxStore.Start(ctx, true) require.NoError(t, err) var titles []string diff --git a/itests/pdp_prove_test.go b/itests/pdp_prove_test.go new file mode 100644 index 000000000..88aea6ae7 --- /dev/null +++ b/itests/pdp_prove_test.go @@ -0,0 +1,159 @@ +package itests + +import ( + "io" + "math/rand" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/proof" + "github.com/filecoin-project/curio/lib/testutils" + "github.com/filecoin-project/curio/pdp/contract" + "github.com/filecoin-project/curio/tasks/pdp" + + "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" +) + +// TestPDPProving verifies the functionality of generating and validating PDP proofs with a random file created in a temporary directory. +func TestPDPProving(t *testing.T) { + dir := t.TempDir() + + rawSize := int64(8323072) + //rawSize := int64(7 * 1024 * 1024 * 1024) + pieceSize := padreader.PaddedSize(uint64(rawSize)).Padded() + + // Create temporary file + fileStr, err := testutils.CreateRandomTmpFile(dir, rawSize) + require.NoError(t, err) + + defer func() { + _ = os.Remove(fileStr) + }() + + f, err := os.Open(fileStr) + require.NoError(t, err) + + stat, err := f.Stat() + require.NoError(t, err) + require.Equal(t, stat.Size(), rawSize) + + defer func() { + _ = f.Close() + }() + + t.Logf("File Size: %d", stat.Size()) + + // Total number of leafs + numberOfLeafs := pieceSize.Unpadded() / 32 + + // Do commP and save the snapshot layer + cp := pdp.NewCommPWithSizeForTest(uint64(rawSize)) + _, err = io.Copy(cp, f) + require.NoError(t, err) + + digest, psize, layerIdx, layer, err := cp.DigestWithSnapShot() + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(psize), pieceSize) + + t.Logf("Digest: %x", digest) + t.Logf("PieceSize: %d", psize) + t.Logf("LayerIdx: %d", layerIdx) + t.Logf("Number of Nodes in snapshot layer: %d", len(layer)) + t.Logf("Total Number of Leafs: %d", numberOfLeafs) + + // Generate challenge leaf + challenge := int64(rand.Intn(int(numberOfLeafs))) + + t.Logf("Challenge: %d", challenge) + + // Calculate start leaf and snapshot leaf indexes + leavesPerNode := int64(1) << layerIdx + snapshotNodeIndex := challenge >> layerIdx + startLeaf := snapshotNodeIndex << layerIdx + t.Logf("Leaves per Node: %d", leavesPerNode) + t.Logf("Start Leaf: %d", startLeaf) + t.Logf("Snapshot Node Index: %d", snapshotNodeIndex) + + snapNode := layer[snapshotNodeIndex] + + // Convert tree-based leaf range to file-based offset/length + offset := int64(abi.PaddedPieceSize(startLeaf * 32).Unpadded()) + length := int64(abi.PaddedPieceSize(leavesPerNode * 32).Unpadded()) + + t.Logf("Offset: %d", offset) + t.Logf("Length: %d", length) + + // Compute padded size to build Merkle tree + subrootSize := padreader.PaddedSize(uint64(length)).Padded() + t.Logf("Subroot Size: %d", subrootSize) + + _, err = f.Seek(0, io.SeekStart) + require.NoError(t, err) + + dataReader := io.NewSectionReader(f, offset, length) + + _, err = f.Seek(offset, io.SeekStart) + require.NoError(t, err) + + fileRemaining := stat.Size() - offset + + t.Logf("File Remaining: %d", fileRemaining) + t.Logf("Is Padding: %t", fileRemaining < length) + + var data io.Reader + if fileRemaining < length { + data = io.MultiReader(dataReader, nullreader.NewNullReader(abi.UnpaddedPieceSize(int64(subrootSize.Unpadded())-fileRemaining))) + } else { + data = dataReader + } + + memtree, err := proof.BuildSha254Memtree(data, subrootSize.Unpadded()) + require.NoError(t, err) + + // Get challenge leaf in subTree + subTreeChallenge := challenge - startLeaf + + // Generate merkle proof for subTree + subTreeProof, err := proof.MemtreeProof(memtree, subTreeChallenge) + require.NoError(t, err) + + // Verify that subTree root is same as snapNode hash + require.Equal(t, subTreeProof.Root, snapNode.Hash) + + // Arrange snapshot layer into a byte array + var layerBytes []byte + for _, node := range layer { + layerBytes = append(layerBytes, node.Hash[:]...) + } + + t.Logf("Layer Bytes: %d", len(layerBytes)) + + // Create subTree from snapshot to commP (root) + mtree, err := proof.BuildSha254MemtreeFromSnapshot(layerBytes) + require.NoError(t, err) + + // Generate merkle proof from snapShot node to commP + proofs, err := proof.MemtreeProof(mtree, snapshotNodeIndex) + require.NoError(t, err) + + var digest32 [32]byte + copy(digest32[:], digest[:]) + + // verify that root and commP match + require.Equal(t, proofs.Root, digest32) + rd := proofs.Root + + out := contract.IPDPTypesProof{ + Leaf: subTreeProof.Leaf, + Proof: append(subTreeProof.Proof, proofs.Proof...), + } + + verified := pdp.Verify(out, rd, uint64(challenge)) + require.True(t, verified) +} diff --git a/lib/cachedreader/cachedreader.go b/lib/cachedreader/cachedreader.go index 1bfaa2b98..179eb6862 100644 --- a/lib/cachedreader/cachedreader.go +++ b/lib/cachedreader/cachedreader.go @@ -12,12 +12,15 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/jellydator/ttlcache/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/pieceprovider" "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/indexstore" ) var ErrNoDeal = errors.New("no deals found") @@ -36,13 +39,15 @@ type CachedPieceReader struct { sectorReader *pieceprovider.SectorReader pieceParkReader *pieceprovider.PieceParkReader + idxStor *indexstore.IndexStore + pieceReaderCacheMu sync.Mutex pieceReaderCache *ttlcache.Cache // Cache for successful readers (10 minutes with TTL extension) pieceErrorCacheMu sync.Mutex pieceErrorCache *ttlcache.Cache // Cache for errors (5 seconds without TTL extension) } -func NewCachedPieceReader(db *harmonydb.DB, sectorReader *pieceprovider.SectorReader, pieceParkReader *pieceprovider.PieceParkReader) *CachedPieceReader { +func NewCachedPieceReader(db *harmonydb.DB, sectorReader *pieceprovider.SectorReader, pieceParkReader *pieceprovider.PieceParkReader, idxStor *indexstore.IndexStore) *CachedPieceReader { prCache := ttlcache.NewCache() _ = prCache.SetTTL(PieceReaderCacheTTL) prCache.SetCacheSizeLimit(MaxCachedReaders) @@ -59,6 +64,7 @@ func NewCachedPieceReader(db *harmonydb.DB, sectorReader *pieceprovider.SectorRe pieceParkReader: pieceParkReader, pieceReaderCache: prCache, pieceErrorCache: errorCache, + idxStor: idxStor, } expireCallback := func(key string, reason ttlcache.EvictionReason, value interface{}) { @@ -90,10 +96,10 @@ func NewCachedPieceReader(db *harmonydb.DB, sectorReader *pieceprovider.SectorRe } type cachedSectionReader struct { - reader storiface.Reader - cpr *CachedPieceReader - pieceCid cid.Cid - pieceSize abi.UnpaddedPieceSize + reader storiface.Reader + cpr *CachedPieceReader + pieceCid cid.Cid + rawSize uint64 // Signals when the underlying piece reader is ready ready chan struct{} // err is non-nil if there's an error getting the underlying piece reader @@ -125,22 +131,32 @@ func (r *cachedSectionReader) Close() error { return nil } -func (cpr *CachedPieceReader) getPieceReaderFromSector(ctx context.Context, pieceCid cid.Cid) (storiface.Reader, abi.UnpaddedPieceSize, error) { +func (cpr *CachedPieceReader) getPieceReaderFromSector(ctx context.Context, pieceCidV2 cid.Cid) (storiface.Reader, uint64, error) { // Get all deals containing this piece + commp, err := commcidv2.CommPFromPCidV2(pieceCidV2) + if err != nil { + return nil, 0, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + pieceCid := commp.PCidV1() + pieceSize := commp.PieceInfo().Size + var deals []struct { - SpID abi.ActorID `db:"sp_id"` - Sector abi.SectorNumber `db:"sector_num"` - Offset abi.PaddedPieceSize `db:"piece_offset"` - Length abi.PaddedPieceSize `db:"piece_length"` - Proof abi.RegisteredSealProof `db:"reg_seal_proof"` + SpID abi.ActorID `db:"sp_id"` + Sector abi.SectorNumber `db:"sector_num"` + Offset abi.PaddedPieceSize `db:"piece_offset"` + Length abi.PaddedPieceSize `db:"piece_length"` + RawSize int64 `db:"raw_size"` + Proof abi.RegisteredSealProof `db:"reg_seal_proof"` } - err := cpr.db.Select(ctx, &deals, `SELECT + err = cpr.db.Select(ctx, &deals, `SELECT mpd.sp_id, mpd.sector_num, mpd.piece_offset, mpd.piece_length, + mpd.raw_size, sm.reg_seal_proof FROM market_piece_deal mpd @@ -150,7 +166,7 @@ func (cpr *CachedPieceReader) getPieceReaderFromSector(ctx context.Context, piec mpd.sp_id = sm.sp_id AND mpd.sector_num = sm.sector_num WHERE - mpd.piece_cid = $1;`, pieceCid.String()) + mpd.piece_cid = $1 AND mpd.piece_length = $2`, pieceCid.String(), pieceSize) if err != nil { return nil, 0, fmt.Errorf("getting piece deals: %w", err) } @@ -177,29 +193,37 @@ func (cpr *CachedPieceReader) getPieceReaderFromSector(ctx context.Context, piec continue } - return reader, dl.Length.Unpadded(), nil + return reader, uint64(dl.RawSize), nil } return nil, 0, merr } -func (cpr *CachedPieceReader) getPieceReaderFromPiecePark(ctx context.Context, pieceCid cid.Cid) (storiface.Reader, abi.UnpaddedPieceSize, error) { +func (cpr *CachedPieceReader) getPieceReaderFromPiecePark(ctx context.Context, pieceCidV2 cid.Cid) (storiface.Reader, uint64, error) { + commp, err := commcidv2.CommPFromPCidV2(pieceCidV2) + if err != nil { + return nil, 0, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + pieceCid := commp.PCidV1() + pieceSize := commp.PieceInfo().Size + // Query parked_pieces and parked_piece_refs in one go var pieceData []struct { ID int64 `db:"id"` PieceRawSize int64 `db:"piece_raw_size"` } - err := cpr.db.Select(ctx, &pieceData, ` + err = cpr.db.Select(ctx, &pieceData, ` SELECT pp.id, pp.piece_raw_size FROM parked_pieces pp WHERE - pp.piece_cid = $1 AND pp.complete = TRUE AND pp.long_term = TRUE + pp.piece_cid = $1 AND pp.piece_padded_size = $2 AND pp.complete = TRUE AND pp.long_term = TRUE LIMIT 1; - `, pieceCid.String()) + `, pieceCid.String(), pieceSize) if err != nil { return nil, 0, fmt.Errorf("failed to query parked_pieces and parked_piece_refs for piece cid %s: %w", pieceCid.String(), err) } @@ -213,11 +237,76 @@ func (cpr *CachedPieceReader) getPieceReaderFromPiecePark(ctx context.Context, p return nil, 0, fmt.Errorf("failed to read piece from piece park: %w", err) } - return reader, abi.UnpaddedPieceSize(pieceData[0].PieceRawSize), nil + return reader, uint64(pieceData[0].PieceRawSize), nil +} + +type SubPieceReader struct { + sr *io.SectionReader + r io.Closer +} + +func (s SubPieceReader) Read(p []byte) (n int, err error) { + return s.sr.Read(p) +} + +func (s SubPieceReader) Close() error { + return s.r.Close() +} + +func (s SubPieceReader) Seek(offset int64, whence int) (int64, error) { + return s.sr.Seek(offset, whence) +} + +func (s SubPieceReader) ReadAt(p []byte, off int64) (n int, err error) { + return s.sr.ReadAt(p, off) +} + +func (cpr *CachedPieceReader) getPieceReaderFromAggregate(ctx context.Context, pieceCidV2 cid.Cid) (storiface.Reader, uint64, error) { + pieces, err := cpr.idxStor.FindPieceInAggregate(ctx, pieceCidV2) + if err != nil { + return nil, 0, fmt.Errorf("failed to find piece in aggregate: %w", err) + } + + if len(pieces) == 0 { + return nil, 0, fmt.Errorf("subpiece not found in any aggregate piece") + } + + pi, err := commcidv2.CommPFromPCidV2(pieceCidV2) + if err != nil { + return nil, 0, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + var merr error + + for _, p := range pieces { + reader, _, err := cpr.getPieceReaderFromPiecePark(ctx, p.Cid) + if err != nil { + log.Warnw("failed to get piece reader from piece park", "piececid", p.Cid.String(), "err", err) + reader, _, err = cpr.getPieceReaderFromSector(ctx, p.Cid) + if err != nil { + log.Errorw("failed to get piece reader from sector", "piececid", p.Cid.String(), "err", err) + merr = multierror.Append(merr, err) + continue + } + sr := io.NewSectionReader(reader, int64(p.Offset), int64(p.Size)) + return SubPieceReader{r: reader, sr: sr}, pi.PayloadSize(), nil + } + sr := io.NewSectionReader(reader, int64(p.Offset), int64(p.Size)) + return SubPieceReader{r: reader, sr: sr}, pi.PayloadSize(), nil + } + return nil, 0, fmt.Errorf("failed to find piece in aggregate: %w", merr) } -func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCid cid.Cid) (storiface.Reader, abi.UnpaddedPieceSize, error) { - cacheKey := pieceCid.String() +func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCidV2 cid.Cid) (storiface.Reader, uint64, error) { + cacheKey := pieceCidV2.String() + + commp, err := commcidv2.CommPFromPCidV2(pieceCidV2) + if err != nil { + return nil, 0, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + pieceCid := commp.PCidV1() + pieceSize := commp.PieceInfo().Size // First check if we have a cached error for this piece cpr.pieceErrorCacheMu.Lock() @@ -239,7 +328,7 @@ func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCid // to the cache r = &cachedSectionReader{ cpr: cpr, - pieceCid: pieceCid, + pieceCid: pieceCidV2, ready: make(chan struct{}), refs: 1, } @@ -250,40 +339,43 @@ func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCid readerCtx, readerCtxCancel := context.WithCancel(context.Background()) defer close(r.ready) - reader, size, err := cpr.getPieceReaderFromSector(readerCtx, pieceCid) + reader, size, err := cpr.getPieceReaderFromAggregate(readerCtx, pieceCidV2) if err != nil { - log.Warnw("failed to get piece reader from sector", "piececid", pieceCid, "err", err) + log.Debugw("failed to get piece reader from aggregate", "piececid", pieceCidV2.String(), "err", err) - serr := err + aerr := err - // Try getPieceReaderFromPiecePark - reader, size, err = cpr.getPieceReaderFromPiecePark(readerCtx, pieceCid) + reader, size, err = cpr.getPieceReaderFromSector(readerCtx, pieceCidV2) if err != nil { - log.Errorw("failed to get piece reader from piece park", "piececid", pieceCid, "err", err) - - finalErr := fmt.Errorf("failed to get piece reader from sector or piece park: %w, %w", err, serr) - - // Cache the error in the error cache - cpr.pieceErrorCacheMu.Lock() - _ = cpr.pieceErrorCache.Set(cacheKey, &cachedError{err: finalErr, pieceCid: pieceCid}) - cpr.pieceErrorCacheMu.Unlock() - - // Remove the failed reader from the main cache - cpr.pieceReaderCacheMu.Lock() - _ = cpr.pieceReaderCache.Remove(cacheKey) - cpr.pieceReaderCacheMu.Unlock() - - r.err = finalErr - readerCtxCancel() - - return nil, 0, finalErr + log.Debugw("failed to get piece reader from sector", "piececid", pieceCidV2.String(), "err", err) + serr := err + // Try getPieceReaderFromPiecePark + reader, size, err = cpr.getPieceReaderFromPiecePark(readerCtx, pieceCidV2) + if err != nil { + log.Errorw("failed to get piece reader from piece park", "piececid", pieceCid, "piece size", pieceSize, "err", err) + finalErr := fmt.Errorf("failed to get piece reader from aggregate, sector or piece park: %w, %w, %w", aerr, serr, err) + // Cache the error in the error cache + cpr.pieceErrorCacheMu.Lock() + _ = cpr.pieceErrorCache.Set(cacheKey, &cachedError{err: finalErr, pieceCid: pieceCid}) + cpr.pieceErrorCacheMu.Unlock() + + // Remove the failed reader from the main cache + cpr.pieceReaderCacheMu.Lock() + _ = cpr.pieceReaderCache.Remove(cacheKey) + cpr.pieceReaderCacheMu.Unlock() + + r.err = finalErr + readerCtxCancel() + + return nil, 0, finalErr + } } } r.reader = reader r.err = nil r.cancel = readerCtxCancel - r.pieceSize = size + r.rawSize = size } else { r = rr.(*cachedSectionReader) r.refs++ @@ -308,7 +400,7 @@ func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCid return nil, 0, r.err } - rs := io.NewSectionReader(r.reader, 0, int64(r.pieceSize)) + rs := io.NewSectionReader(r.reader, 0, int64(r.rawSize)) return struct { io.Closer @@ -320,5 +412,5 @@ func (cpr *CachedPieceReader) GetSharedPieceReader(ctx context.Context, pieceCid Reader: rs, Seeker: rs, ReaderAt: r.reader, - }, r.pieceSize, nil + }, r.rawSize, nil } diff --git a/lib/commcidv2/commcidv2.go b/lib/commcidv2/commcidv2.go new file mode 100644 index 000000000..5000dbf9f --- /dev/null +++ b/lib/commcidv2/commcidv2.go @@ -0,0 +1,234 @@ +package commcidv2 + +import ( + "math/bits" + + "github.com/ipfs/go-cid" + pool "github.com/libp2p/go-buffer-pool" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "golang.org/x/xerrors" + + filabi "github.com/filecoin-project/go-state-types/abi" +) + +type CommP struct { + hashType int8 + treeHeight int8 + payloadPadding uint64 + digest []byte +} + +// hardcoded for npw +const ( + nodeSize = 32 + nodeLog2Size = 5 +) + +var mhMeta = map[int8]struct { + treeArity int8 + nodeLog2Size int8 + pCidV1Pref string + pCidV2Pref string +}{ + 1: { + treeArity: 2, + nodeLog2Size: nodeLog2Size, + pCidV1Pref: "\x01" + "\x81\xE2\x03" + "\x92\x20" + "\x20", // + 32 byte digest == total 39 byte cid + pCidV2Pref: "\x01" + "\x55" + "\x91\x20", // + mh varlen + varpad + int8 height + 32 byte digest == total AT LEAST 39 byte cid + }, +} + +func CommPFromPieceInfo(pi filabi.PieceInfo) (CommP, error) { + var cp CommP + if bits.OnesCount64(uint64(pi.Size)) > 1 { + return cp, xerrors.Errorf("malformed PieceInfo: .Size %d not a power of 2", pi.Size) + } + + // hardcoded until we get another commitment type + cp.hashType = 1 + ks := pi.PieceCID.KeyString() + cp.digest = []byte(ks[len(ks)-nodeSize:]) + + cp.treeHeight = 63 - int8(bits.LeadingZeros64(uint64(pi.Size))) - nodeLog2Size + + return cp, nil +} + +func CommPFromPCidV2(c cid.Cid) (CommP, error) { + var cp CommP + + dmh, err := multihash.Decode(c.Hash()) + if err != nil { + return cp, xerrors.Errorf("decoding cid: %w", err) + } + + // hardcoded for now at https://github.com/multiformats/multicodec/pull/331/files#diff-bf5b449ed8c1850371f42808a186b5c5089edd0025700505a6b8f426cd54a6e4R149 + if dmh.Code != 0x1011 { + return cp, xerrors.Errorf("unexpected multihash code %d", dmh.Code) + } + + p, n, err := varint.FromUvarint(dmh.Digest) + if err != nil { + return cp, xerrors.Errorf("decoding varint: %w", err) + } + + cp.hashType = 1 + cp.payloadPadding = p + cp.treeHeight = int8(dmh.Digest[n]) + cp.digest = dmh.Digest[n+1:] + + return cp, nil +} + +func NewSha2CommP(payloadSize uint64, digest []byte) (CommP, error) { + var cp CommP + + // hardcoded for now + if len(digest) != nodeSize { + return cp, xerrors.Errorf("digest size must be 32, got %d", len(digest)) + } + + psz := payloadSize + + // always 4 nodes long + if psz < 127 { + psz = 127 + } + + // fr32 expansion, count 127 blocks, rounded up + boxSize := ((psz + 126) / 127) * 128 + + // hardcoded for now + cp.hashType = 1 + cp.digest = digest + + cp.treeHeight = 63 - int8(bits.LeadingZeros64(boxSize)) - nodeLog2Size + if bits.OnesCount64(boxSize) != 1 { + cp.treeHeight++ + } + cp.payloadPadding = ((1 << (cp.treeHeight - 2)) * 127) - payloadSize + + return cp, nil +} + +func (cp *CommP) PayloadSize() uint64 { + return (1<<(cp.treeHeight-2))*127 - cp.payloadPadding +} + +func (cp *CommP) PieceLog2Size() int8 { + return cp.treeHeight + nodeLog2Size +} + +func (cp *CommP) PieceInfo() filabi.PieceInfo { + return filabi.PieceInfo{ + Size: filabi.PaddedPieceSize(1 << (cp.treeHeight + nodeLog2Size)), + PieceCID: cp.PCidV1(), // for now it won't understand anything else but V1... I think + } +} + +func (cp *CommP) PCidV1() cid.Cid { + pref := mhMeta[cp.hashType].pCidV1Pref + buf := pool.Get(len(pref) + len(cp.digest)) + copy(buf, pref) + copy(buf[len(pref):], cp.digest) + c, err := cid.Cast(buf) + pool.Put(buf) + if err != nil { + panic(err) + } + return c +} + +func (cp *CommP) PCidV2() cid.Cid { + pref := mhMeta[cp.hashType].pCidV2Pref + + ps := varint.UvarintSize(cp.payloadPadding) + + buf := pool.Get(len(pref) + + 1 + // size of the entire mh "payload" won't exceed 127 bytes + ps + + 1 + // the height is an int8 + nodeSize, // digest size, hardcoded for now + ) + + n := copy(buf, pref) + buf[n] = byte(ps + 1 + nodeSize) + n++ + + n += varint.PutUvarint(buf[n:], cp.payloadPadding) + + buf[n] = byte(cp.treeHeight) + n++ + + copy(buf[n:], cp.digest) + + c, err := cid.Cast(buf) + + pool.Put(buf) + if err != nil { + panic(err) + } + + return c +} + +func (cp *CommP) Digest() []byte { return cp.digest } + +func IsPieceCidV2(c cid.Cid) bool { + if c.Type() != uint64(multicodec.Raw) { + return false + } + + decoded, err := multihash.Decode(c.Hash()) + if err != nil { + return false + } + + if decoded.Code != uint64(multicodec.Fr32Sha256Trunc254Padbintree) { + return false + } + + if len(decoded.Digest) < 34 { + return false + } + + return true +} +func PieceCidV2FromV1(v1PieceCid cid.Cid, payloadsize uint64) (cid.Cid, error) { + decoded, err := multihash.Decode(v1PieceCid.Hash()) + if err != nil { + return cid.Undef, xerrors.Errorf("Error decoding data commitment hash: %w", err) + } + + filCodec := multicodec.Code(v1PieceCid.Type()) + filMh := multicodec.Code(decoded.Code) + + switch filCodec { + case multicodec.FilCommitmentUnsealed: + if filMh != multicodec.Sha2_256Trunc254Padded { + return cid.Undef, xerrors.Errorf("unexpected hash: %d", filMh) + } + case multicodec.FilCommitmentSealed: + if filMh != multicodec.PoseidonBls12_381A2Fc1 { + return cid.Undef, xerrors.Errorf("unexpected hash: %d", filMh) + } + default: // neither of the codecs above: we are not in Fil teritory + return cid.Undef, xerrors.Errorf("unexpected codec: %d", filCodec) + } + + if len(decoded.Digest) != 32 { + return cid.Undef, xerrors.Errorf("commitments must be 32 bytes long") + } + if filCodec != multicodec.FilCommitmentUnsealed { + return cid.Undef, xerrors.Errorf("unexpected codec: %d", filCodec) + } + + c, err := NewSha2CommP(payloadsize, decoded.Digest) + if err != nil { + return cid.Undef, xerrors.Errorf("error creating CommP: %w", err) + } + + return c.PCidV2(), nil +} diff --git a/lib/commcidv2/commcidv2_test.go b/lib/commcidv2/commcidv2_test.go new file mode 100644 index 000000000..b7e99e82e --- /dev/null +++ b/lib/commcidv2/commcidv2_test.go @@ -0,0 +1,409 @@ +package commcidv2 + +import ( + "fmt" + "testing" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test constants that should match our TypeScript implementation +func TestConstants(t *testing.T) { + // These constants are string-based identifiers in Go, not numeric constants + // We'll verify they exist and have the expected string values + assert.NotEqual(t, multicodec.Code(0), multicodec.FilCommitmentUnsealed) + assert.NotEqual(t, multicodec.Code(0), multicodec.FilCommitmentSealed) + assert.NotEqual(t, multicodec.Code(0), multicodec.Sha2_256Trunc254Padded) + assert.NotEqual(t, multicodec.Code(0), multicodec.PoseidonBls12_381A2Fc1) + + // Raw codec constant + assert.Equal(t, multicodec.Raw, multicodec.Code(0x55)) + + // Verify the constants exist and are not zero + // Note: These are numeric constants in Go, not string identifiers + assert.True(t, uint64(multicodec.FilCommitmentUnsealed) > 0) + assert.True(t, uint64(multicodec.FilCommitmentSealed) > 0) + assert.True(t, uint64(multicodec.Sha2_256Trunc254Padded) > 0) + assert.True(t, uint64(multicodec.PoseidonBls12_381A2Fc1) > 0) +} + +// Test PieceCidV2FromV1 with wrong hash type (demonstrates validation) +func TestPieceCidV2FromV1_WrongHashType(t *testing.T) { + // Create a valid unsealed commitment CID v1 + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i) + } + + // Create multihash with SHA2_256 (standard hash function) + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + // Create CID v1 with FilCommitmentUnsealed codec + // Note: We'll use SHA2_256 instead of SHA2_256Trunc254Padded for testing + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test conversion - should fail because we're using SHA2_256 instead of SHA2_256Trunc254Padded + payloadSize := uint64(1024) + cidV2, err := PieceCidV2FromV1(cidV1, payloadSize) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected hash") + + // This test demonstrates that our TypeScript implementation should also validate hash types + // and reject CIDs with incorrect hash functions +} + +// Test PieceCidV2FromV1 with valid sealed commitment +func TestPieceCidV2FromV1_ValidSealed(t *testing.T) { + // Create a valid sealed commitment CID v1 + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 1) + } + + // Create multihash with SHA2_256 (standard hash function) + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + // Create CID v1 with FilCommitmentSealed codec + // Note: We'll use SHA2_256 instead of PoseidonBls12_381A2Fc1 for testing + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentSealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test conversion - should fail because we're using wrong hash type + payloadSize := uint64(1024) + cidV2, err := PieceCidV2FromV1(cidV1, payloadSize) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected hash") +} + +// Test PieceCidV2FromV1 with invalid codec +func TestPieceCidV2FromV1_InvalidCodec(t *testing.T) { + // Create a CID v1 with raw codec (not Filecoin) + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 2) + } + + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.Raw), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test conversion - should fail with unexpected codec + payloadSize := uint64(1024) + cidV2, err := PieceCidV2FromV1(cidV1, payloadSize) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected codec") +} + +// Test PieceCidV2FromV1 with invalid hash type +func TestPieceCidV2FromV1_InvalidHashType(t *testing.T) { + // Create a CID v1 with unsealed codec but wrong hash type + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 3) + } + + // Use SHA2_256 instead of SHA2_256Trunc254Padded + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test conversion - should fail with unexpected hash + payloadSize := uint64(1024) + cidV2, err := PieceCidV2FromV1(cidV1, payloadSize) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected hash") +} + +// Test PieceCidV2FromV1 with invalid digest length +func TestPieceCidV2FromV1_InvalidDigestLength(t *testing.T) { + // Create a CID v1 with unsealed codec but wrong digest length + digest := make([]byte, 16) // Only 16 bytes instead of 32 + for i := range digest { + digest[i] = byte(i + 4) + } + + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test conversion - should fail with hash type error (not digest length) + payloadSize := uint64(1024) + cidV2, err := PieceCidV2FromV1(cidV1, payloadSize) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected hash") +} + +// Test PieceCidV2FromV1 with different payload sizes +func TestPieceCidV2FromV1_DifferentPayloadSizes(t *testing.T) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 5) + } + + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + // Test with different payload sizes - should fail with hash type error + testSizes := []uint64{1, 127, 128, 1024, 2048, 4096, 8192} + + for _, size := range testSizes { + t.Run(fmt.Sprintf("PayloadSize_%d", size), func(t *testing.T) { + cidV2, err := PieceCidV2FromV1(cidV1, size) + assert.Error(t, err) + assert.Equal(t, cid.Undef, cidV2) + assert.Contains(t, err.Error(), "unexpected hash") + }) + } +} + +// Test NewSha2CommP with valid inputs +func TestNewSha2CommP_Valid(t *testing.T) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 6) + } + + payloadSize := uint64(1024) + commP, err := NewSha2CommP(payloadSize, digest) + require.NoError(t, err) + + // Verify the CommP structure + assert.Equal(t, int8(1), commP.hashType) + assert.Equal(t, digest, commP.digest) + assert.True(t, commP.treeHeight > 0) + + // Verify payload size calculation + computedSize := commP.PayloadSize() + assert.Equal(t, payloadSize, computedSize) +} + +// Test NewSha2CommP with invalid digest length +func TestNewSha2CommP_InvalidDigestLength(t *testing.T) { + // Test with digest that's too short + digest := make([]byte, 16) + payloadSize := uint64(1024) + + commP, err := NewSha2CommP(payloadSize, digest) + assert.Error(t, err) + assert.Equal(t, CommP{}, commP) + assert.Contains(t, err.Error(), "digest size must be 32") +} + +// Test NewSha2CommP with digest that's too long +func TestNewSha2CommP_InvalidDigestLengthTooLong(t *testing.T) { + // Test with digest that's too long + digest := make([]byte, 64) + payloadSize := uint64(1024) + + commP, err := NewSha2CommP(payloadSize, digest) + assert.Error(t, err) + assert.Equal(t, CommP{}, commP) + assert.Contains(t, err.Error(), "digest size must be 32") +} + +// Test CommP methods +func TestCommP_Methods(t *testing.T) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 7) + } + + payloadSize := uint64(1024) + commP, err := NewSha2CommP(payloadSize, digest) + require.NoError(t, err) + + // Test Digest method + assert.Equal(t, digest, commP.Digest()) + + // Test PieceLog2Size method + log2Size := commP.PieceLog2Size() + assert.True(t, log2Size > 0) + + // Test PieceInfo method + pieceInfo := commP.PieceInfo() + // Note: The actual size may be different due to padding and alignment + assert.True(t, uint64(pieceInfo.Size) >= payloadSize) + assert.NotEqual(t, cid.Undef, pieceInfo.PieceCID) + + // Test PCidV1 method + cidV1 := commP.PCidV1() + assert.NotEqual(t, cid.Undef, cidV1) + assert.Equal(t, uint64(multicodec.FilCommitmentUnsealed), cidV1.Type()) + + // Test PCidV2 method + cidV2 := commP.PCidV2() + assert.NotEqual(t, cid.Undef, cidV2) + assert.Equal(t, uint64(multicodec.Raw), cidV2.Type()) + assert.True(t, IsPieceCidV2(cidV2)) +} + +// Test IsPieceCidV2 with valid piece CID v2 +func TestIsPieceCidV2_Valid(t *testing.T) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 8) + } + + payloadSize := uint64(1024) + commP, err := NewSha2CommP(payloadSize, digest) + require.NoError(t, err) + + cidV2 := commP.PCidV2() + assert.True(t, IsPieceCidV2(cidV2)) +} + +// Test IsPieceCidV2 with invalid CIDs +func TestIsPieceCidV2_Invalid(t *testing.T) { + // Test with raw CID (not piece CID v2) + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 9) + } + + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + require.NoError(t, err) + + rawCid, err := cid.V1Builder{ + Codec: uint64(multicodec.Raw), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + assert.False(t, IsPieceCidV2(rawCid)) + + // Test with unsealed commitment CID (not piece CID v2) + unsealedCid, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + require.NoError(t, err) + + assert.False(t, IsPieceCidV2(unsealedCid)) +} + +// Test edge cases and boundary conditions +func TestEdgeCases(t *testing.T) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i + 10) + } + + // Test with minimum payload size + commP, err := NewSha2CommP(1, digest) + require.NoError(t, err) + assert.True(t, commP.PayloadSize() >= 1) + + // Test with very large payload size + largeSize := uint64(1 << 30) // 1GB + commP, err = NewSha2CommP(largeSize, digest) + require.NoError(t, err) + assert.Equal(t, largeSize, commP.PayloadSize()) + + // Test with power of 2 payload sizes + powerOf2Sizes := []uint64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024} + for _, size := range powerOf2Sizes { + t.Run(fmt.Sprintf("PowerOf2_%d", size), func(t *testing.T) { + commP, err := NewSha2CommP(size, digest) + require.NoError(t, err) + assert.Equal(t, size, commP.PayloadSize()) + }) + } +} + +// Benchmark tests for performance +func BenchmarkPieceCidV2FromV1(b *testing.B) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i) + } + + mh, err := multihash.Encode(digest, uint64(multicodec.Sha2_256)) + if err != nil { + b.Fatal(err) + } + + cidV1, err := cid.V1Builder{ + Codec: uint64(multicodec.FilCommitmentUnsealed), + MhType: uint64(multicodec.Sha2_256), + MhLength: -1, + }.Sum(mh) + if err != nil { + b.Fatal(err) + } + + payloadSize := uint64(1024) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := PieceCidV2FromV1(cidV1, payloadSize) + // This will always fail due to wrong hash type, but we're benchmarking the function + // In real usage, you'd use the correct hash type + if err == nil { + b.Fatal("Expected error due to wrong hash type") + } + } +} + +func BenchmarkNewSha2CommP(b *testing.B) { + digest := make([]byte, 32) + for i := range digest { + digest[i] = byte(i) + } + + payloadSize := uint64(1024) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := NewSha2CommP(payloadSize, digest) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/lib/ffi/piece_funcs.go b/lib/ffi/piece_funcs.go index cd8b3ab20..2747674ac 100644 --- a/lib/ffi/piece_funcs.go +++ b/lib/ffi/piece_funcs.go @@ -8,8 +8,12 @@ import ( "golang.org/x/xerrors" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/harmony/harmonytask" - storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, pieceID storiface.PieceNumber, size int64, data io.Reader, storageType storiface.PathType) error { @@ -83,3 +87,82 @@ func (sb *SealCalls) PieceReader(ctx context.Context, id storiface.PieceNumber) func (sb *SealCalls) RemovePiece(ctx context.Context, id storiface.PieceNumber) error { return sb.Sectors.storage.Remove(ctx, id.Ref().ID, storiface.FTPiece, true, nil) } + +func (sb *SealCalls) WriteUploadPiece(ctx context.Context, pieceID storiface.PieceNumber, size int64, data io.Reader, storageType storiface.PathType, verifySize bool) (abi.PieceInfo, uint64, error) { + // Use storageType in AcquireSector + paths, pathIDs, done, err := sb.Sectors.AcquireSector(ctx, nil, pieceID.Ref(), storiface.FTNone, storiface.FTPiece, storageType) + if err != nil { + return abi.PieceInfo{}, 0, err + } + skipDeclare := storiface.FTPiece + + defer func() { + done(skipDeclare) + }() + + dest := paths.Piece + tempDest := dest + storiface.TempSuffix + + destFile, err := os.OpenFile(tempDest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("creating temp piece file '%s': %w", tempDest, err) + } + + removeTemp := true + defer func() { + if removeTemp { + rerr := os.Remove(tempDest) + if rerr != nil { + log.Errorf("removing temp file: %+v", rerr) + } + } + }() + + copyStart := time.Now() + + wr := new(commp.Calc) + writers := io.MultiWriter(wr, destFile) + + n, err := io.CopyBuffer(writers, io.LimitReader(data, size), make([]byte, 8<<20)) + if err != nil { + _ = destFile.Close() + return abi.PieceInfo{}, 0, xerrors.Errorf("copying piece data: %w", err) + } + + if err := destFile.Close(); err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("closing temp piece file: %w", err) + } + + if verifySize && n != size { + return abi.PieceInfo{}, 0, xerrors.Errorf("short write: %d", n) + } + + digest, pieceSize, err := wr.Digest() + if err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("computing piece digest: %w", err) + } + + pcid, err := commcid.DataCommitmentV1ToCID(digest) + if err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("computing piece CID: %w", err) + } + psize := abi.PaddedPieceSize(pieceSize) + + copyEnd := time.Now() + + log.Infow("wrote piece", "piece", pieceID, "size", n, "duration", copyEnd.Sub(copyStart), "dest", dest, "MiB/s", float64(size)/(1<<20)/copyEnd.Sub(copyStart).Seconds()) + + if err := os.Rename(tempDest, dest); err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("rename temp piece to dest %s -> %s: %w", tempDest, dest, err) + } + + skipDeclare = storiface.FTNone + + removeTemp = false + + if err := sb.ensureOneCopy(ctx, pieceID.Ref().ID, pathIDs, storiface.FTPiece); err != nil { + return abi.PieceInfo{}, 0, xerrors.Errorf("ensure one copy: %w", err) + } + + return abi.PieceInfo{PieceCID: pcid, Size: psize}, uint64(n), nil +} diff --git a/lib/paths/http_handler.go b/lib/paths/http_handler.go index 5a7e91e67..020afb8fd 100644 --- a/lib/paths/http_handler.go +++ b/lib/paths/http_handler.go @@ -65,6 +65,7 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") + mux.HandleFunc("/remote/stash/{id}", handler.remoteGetSector).Methods("POST") mux.ServeHTTP(w, r) } diff --git a/lib/paths/index.go b/lib/paths/index.go index 35e667fe1..e2d0b2651 100644 --- a/lib/paths/index.go +++ b/lib/paths/index.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" ) diff --git a/lib/paths/local.go b/lib/paths/local.go index 6c80893c9..fe437f5ae 100644 --- a/lib/paths/local.go +++ b/lib/paths/local.go @@ -343,7 +343,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { return xerrors.Errorf("declaring storage in index: %w", err) } - if err := st.declareSectors(ctx, p, meta.ID, meta.CanStore, false); err != nil { + if err := st.declareSectors(ctx, p, meta.ID, meta.CanStore, true); err != nil { return err } @@ -391,11 +391,28 @@ func (st *Local) open(ctx context.Context) error { go st.reportHealth(ctx) + go st.startPeriodicRedeclare(ctx) + return nil } var declareCounter atomic.Int32 +func (st *Local) startPeriodicRedeclare(ctx context.Context) { + ticker := time.NewTicker(time.Hour * 4) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := st.Redeclare(ctx, nil, true); err != nil { + log.Errorf("redeclaring storage: %w", err) + } + case <-ctx.Done(): + return + } + } +} + func (st *Local) Redeclare(ctx context.Context, filterId *storiface.ID, dropMissingDecls bool) error { st.localLk.Lock() defer st.localLk.Unlock() diff --git a/lib/proof/merkle_sha254_memtree.go b/lib/proof/merkle_sha254_memtree.go index fec3daecd..009d10159 100644 --- a/lib/proof/merkle_sha254_memtree.go +++ b/lib/proof/merkle_sha254_memtree.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/fr32" ) -const MaxMemtreeSize = 256 << 20 +const MaxMemtreeSize = 1 << 30 // BuildSha254Memtree builds a sha256 memtree from the input data // Returned slice should be released to the pool after use @@ -72,3 +72,46 @@ func ComputeBinShaParent(left, right [NODE_SIZE]byte) [NODE_SIZE]byte { out[NODE_SIZE-1] &= 0x3F return out } + +func BuildSha254MemtreeFromSnapshot(data []byte) ([]byte, error) { + size := abi.PaddedPieceSize(len(data)) + if size > MaxMemtreeSize { + return nil, xerrors.Errorf("piece too large for memtree: %d", size) + } + + nLeaves := int64(size) / NODE_SIZE + totalNodes, levelSizes := computeTotalNodes(nLeaves, 2) + memtreeBuf := pool.Get(int(totalNodes * NODE_SIZE)) + + copy(memtreeBuf[:len(data)], data) + + d := sha256.New() + + levelStarts := make([]int64, len(levelSizes)) + levelStarts[0] = 0 + for i := 1; i < len(levelSizes); i++ { + levelStarts[i] = levelStarts[i-1] + levelSizes[i-1]*NODE_SIZE + } + + for level := 1; level < len(levelSizes); level++ { + levelNodes := levelSizes[level] + prevLevelStart := levelStarts[level-1] + currLevelStart := levelStarts[level] + + for i := int64(0); i < levelNodes; i++ { + leftOffset := prevLevelStart + (2*i)*NODE_SIZE + + d.Reset() + d.Write(memtreeBuf[leftOffset : leftOffset+(NODE_SIZE*2)]) + + outOffset := currLevelStart + i*NODE_SIZE + // sum calls append, so we give it a zero len slice at the correct offset + d.Sum(memtreeBuf[outOffset:outOffset]) + + // set top bits to 00 + memtreeBuf[outOffset+NODE_SIZE-1] &= 0x3F + } + } + + return memtreeBuf, nil +} diff --git a/lib/testutils/testutils.go b/lib/testutils/testutils.go index 2203bc245..49b676bd4 100644 --- a/lib/testutils/testutils.go +++ b/lib/testutils/testutils.go @@ -2,10 +2,13 @@ package testutils import ( "context" + "crypto/rand" "fmt" "io" - "math/rand" + "math/bits" "os" + "strings" + "time" "github.com/ipfs/boxo/blockservice" bstore "github.com/ipfs/boxo/blockstore" @@ -23,12 +26,20 @@ import ( carv2 "github.com/ipld/go-car/v2" "github.com/ipld/go-car/v2/blockstore" "github.com/multiformats/go-multihash" + "github.com/oklog/ulid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-data-segment/datasegment" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/commcidv2" ) const defaultHashFunction = uint64(multihash.BLAKE2B_MIN + 31) -func CreateRandomFile(dir string, rseed int64, size int64) (string, error) { - source := io.LimitReader(rand.New(rand.NewSource(rseed)), size) +func CreateRandomTmpFile(dir string, size int64) (string, error) { + source := io.LimitReader(rand.Reader, size) file, err := os.CreateTemp(dir, "sourcefile.dat") if err != nil { @@ -164,3 +175,122 @@ func WriteUnixfsDAGTo(path string, into ipldformat.DAGService, chunksize int64, return nd.Cid(), nil } + +func CreateAggregateFromCars(files []string, dealSize abi.PaddedPieceSize, aggregateOut bool) (cid.Cid, error) { + var lines []string + var readers []io.Reader + var deals []abi.PieceInfo + + for _, f := range files { + file, err := os.Open(f) + if err != nil { + return cid.Undef, xerrors.Errorf("opening subpiece file: %w", err) + } + stat, err := file.Stat() + if err != nil { + return cid.Undef, xerrors.Errorf("getting file stat: %w", err) + } + cp := new(commp.Calc) + _, err = io.Copy(cp, file) + if err != nil { + return cid.Undef, xerrors.Errorf("copying subpiece to commp writer: %w", err) + } + _, err = file.Seek(0, io.SeekStart) + if err != nil { + return cid.Undef, xerrors.Errorf("seeking to start of file: %w", err) + } + pbytes, size, err := cp.Digest() + if err != nil { + return cid.Undef, xerrors.Errorf("computing digest for subpiece: %w", err) + } + comm, err := commcidv2.NewSha2CommP(uint64(stat.Size()), pbytes) + if err != nil { + return cid.Undef, xerrors.Errorf("converting data commitment to CID: %w", err) + } + deals = append(deals, abi.PieceInfo{ + PieceCID: comm.PCidV1(), + Size: abi.PaddedPieceSize(size), + }) + readers = append(readers, file) + urlStr := fmt.Sprintf("http://piece-server:12320/pieces?id=%s", stat.Name()) + lines = append(lines, fmt.Sprintf("%s\t%s", comm.PCidV2().String(), urlStr)) + } + + _, upsize, err := datasegment.ComputeDealPlacement(deals) + if err != nil { + return cid.Undef, xerrors.Errorf("computing deal placement: %w", err) + } + + next := 1 << (64 - bits.LeadingZeros64(upsize+256)) + + if abi.PaddedPieceSize(next) != dealSize { + return cid.Undef, fmt.Errorf("deal size mismatch: expected %d, got %d", dealSize, abi.PaddedPieceSize(next)) + } + + a, err := datasegment.NewAggregate(abi.PaddedPieceSize(next), deals) + if err != nil { + return cid.Undef, xerrors.Errorf("creating aggregate: %w", err) + } + out, err := a.AggregateObjectReader(readers) + if err != nil { + return cid.Undef, xerrors.Errorf("creating aggregate reader: %w", err) + } + + x, err := ulid.New(uint64(time.Now().UnixMilli()), rand.Reader) + if err != nil { + return cid.Undef, xerrors.Errorf("creating aggregate file: %w", err) + } + + f, err := os.OpenFile(x.String(), os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644) + if err != nil { + return cid.Undef, err + } + defer func() { + _ = f.Close() + }() + + cp := new(commp.Calc) + w := io.MultiWriter(cp, f) + + n, err := io.Copy(w, out) + if err != nil { + _ = f.Close() + return cid.Undef, xerrors.Errorf("writing aggregate: %w", err) + } + + _ = f.Close() + + digest, paddedPieceSize, err := cp.Digest() + if err != nil { + return cid.Undef, xerrors.Errorf("computing digest: %w", err) + } + if abi.PaddedPieceSize(paddedPieceSize) != dealSize { + return cid.Undef, fmt.Errorf("deal size mismatch after final commP: expected %d, got %d", dealSize, abi.PaddedPieceSize(paddedPieceSize)) + } + + if n != int64(dealSize.Unpadded()) { + return cid.Undef, fmt.Errorf("incorrect aggregate raw size: expected %d, got %d", dealSize.Unpadded(), n) + } + + comm, err := commcidv2.NewSha2CommP(uint64(n), digest) + if err != nil { + return cid.Undef, xerrors.Errorf("creating commP: %w", err) + } + + err = os.WriteFile(fmt.Sprintf("aggregate_%s", comm.PCidV2().String()), []byte(strings.Join(lines, "\n")), 0644) + if err != nil { + return cid.Undef, xerrors.Errorf("writing aggregate to file: %w", err) + } + + if !aggregateOut { + defer func() { + _ = os.Remove(f.Name()) + }() + } else { + defer func() { + _ = os.Rename(f.Name(), fmt.Sprintf("aggregate_%s.piece", comm.PCidV2().String())) + }() + } + + return comm.PCidV2(), nil +} diff --git a/market/http/http.go b/market/http/http.go index a9c4934bf..886cca1dd 100644 --- a/market/http/http.go +++ b/market/http/http.go @@ -1,32 +1,61 @@ package http import ( + "github.com/ethereum/go-ethereum/ethclient" "github.com/go-chi/chi/v5" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/paths" mk12http "github.com/filecoin-project/curio/market/mk12/http" + mk20http "github.com/filecoin-project/curio/market/mk20/http" + "github.com/filecoin-project/curio/pdp" + "github.com/filecoin-project/curio/tasks/message" storage_market "github.com/filecoin-project/curio/tasks/storage-market" ) type MarketHandler struct { - mdh *mk12http.MK12DealHandler + mdh12 *mk12http.MK12DealHandler + mdh20 *mk20http.MK20DealHandler + pdpService *pdp.PDPService + domainName string } // NewMarketHandler is used to prepare all the required market handlers. Currently, it supports mk12 deal market. // This function should be used to expand the functionality under "/market" path -func NewMarketHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_market.CurioStorageDealMarket) (*MarketHandler, error) { - mdh, err := mk12http.NewMK12DealHandler(db, cfg, dm) +func NewMarketHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_market.CurioStorageDealMarket, eth *ethclient.Client, fc pdp.PDPServiceNodeApi, sn *message.SenderETH, stor paths.StashStore) (*MarketHandler, error) { + mdh12, err := mk12http.NewMK12DealHandler(db, cfg, dm) if err != nil { return nil, err } + + mdh20, err := mk20http.NewMK20DealHandler(db, cfg, dm) + if err != nil { + return nil, err + } + + var pdpService *pdp.PDPService + + if sn != nil { + pdpService = pdp.NewPDPService(db, stor, eth, fc, sn) + //pdp.Routes(r, pdsvc) + } + return &MarketHandler{ - mdh: mdh, + mdh12: mdh12, + mdh20: mdh20, + pdpService: pdpService, + domainName: cfg.HTTP.DomainName, }, nil } // Router is used to attach all the market handlers // This can include mk12 deals, mk20 deals(WIP), sector market(WIP) etc func Router(mux *chi.Mux, mh *MarketHandler) { - mux.Mount("/market/mk12", mk12http.Router(mh.mdh)) + mux.Mount("/market/mk12", mk12http.Router(mh.mdh12)) + mux.Mount("/market/mk20", mk20http.Router(mh.mdh20, mh.domainName)) + if mh.pdpService != nil { + mux.Mount("/market/pdp", pdp.Routes(mh.pdpService)) + } + // TODO: Attach a info endpoint here with details about supported market modules and services under them } diff --git a/market/indexstore/create.cql b/market/indexstore/cql/0001_create.cql similarity index 100% rename from market/indexstore/create.cql rename to market/indexstore/cql/0001_create.cql diff --git a/market/indexstore/cql/0002_piece_index.cql b/market/indexstore/cql/0002_piece_index.cql new file mode 100644 index 000000000..aa9f87a42 --- /dev/null +++ b/market/indexstore/cql/0002_piece_index.cql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS PieceToAggregatePiece ( + AggregatePieceCid BLOB PRIMARY KEY, + PieceCid BLOB, + UnpaddedOffset BIGINT, + UnpaddedLength BIGINT +); + +CREATE TABLE IF NOT EXISTS PDPCacheLayer ( + PieceCid BLOB PRIMARY KEY, + LayerIndex INT, + Leaf BLOB, + LeafIndex BIGINT +); \ No newline at end of file diff --git a/market/indexstore/indexstore.go b/market/indexstore/indexstore.go index 88c52a303..36ec8154f 100644 --- a/market/indexstore/indexstore.go +++ b/market/indexstore/indexstore.go @@ -2,9 +2,12 @@ package indexstore import ( "context" - _ "embed" + "embed" "errors" "fmt" + "math/rand" + "sort" + "strconv" "strings" "time" @@ -20,8 +23,8 @@ import ( const keyspace = "curio" -//go:embed create.cql -var createCQL string +//go:embed cql/*.cql +var cqlFiles embed.FS var log = logging.Logger("indexstore") @@ -71,35 +74,46 @@ func isNotFoundErr(err error) bool { return strings.Contains(strings.ToLower(err.Error()), "not found") } -func NewIndexStore(hosts []string, port int, cfg *config.CurioConfig) (*IndexStore, error) { - if len(hosts) == 0 { - return nil, xerrors.Errorf("no hosts provided for cassandra") - } - +func NewIndexStore(hosts []string, port int, cfg *config.CurioConfig) *IndexStore { cluster := gocql.NewCluster(hosts...) cluster.Timeout = 5 * time.Minute cluster.Consistency = gocql.One cluster.NumConns = cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency * 8 cluster.Port = port - store := &IndexStore{ + return &IndexStore{ cluster: cluster, settings: settings{ InsertBatchSize: cfg.Market.StorageMarketConfig.Indexing.InsertBatchSize, InsertConcurrency: cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency, }, } +} - return store, store.Start(context.Background()) +type ITestID string + +// ItestNewID see ITestWithID doc +func ITestNewID() ITestID { + return ITestID(strconv.Itoa(rand.Intn(99999))) } -func (i *IndexStore) Start(ctx context.Context) error { +func (i *IndexStore) Start(ctx context.Context, test bool) error { + if len(i.cluster.Hosts) == 0 { + return xerrors.Errorf("no hosts provided for cassandra") + } + + keyspaceName := keyspace + if test { + id := ITestNewID() + keyspaceName = fmt.Sprintf("test%s", id) + } + // Create Cassandra keyspace session, err := i.cluster.CreateSession() if err != nil { return xerrors.Errorf("creating cassandra session: %w", err) } - query := `CREATE KEYSPACE IF NOT EXISTS ` + keyspace + + query := `CREATE KEYSPACE IF NOT EXISTS ` + keyspaceName + ` WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }` err = session.Query(query).WithContext(ctx).Exec() if err != nil { @@ -109,22 +123,38 @@ func (i *IndexStore) Start(ctx context.Context) error { session.Close() // Recreate session with the keyspace - i.cluster.Keyspace = keyspace + i.cluster.Keyspace = keyspaceName session, err = i.cluster.CreateSession() if err != nil { return xerrors.Errorf("creating cassandra session: %w", err) } - lines := strings.Split(createCQL, ";") - for _, line := range lines { - line = strings.Trim(line, "\n \t") - if line == "" { + entries, err := cqlFiles.ReadDir("cql") + if err != nil { + log.Fatalf("failed to read embedded directory: %v", err) + } + + for _, entry := range entries { + if entry.IsDir() { continue } - log.Debug(line) - err := session.Query(line).WithContext(ctx).Exec() + + data, err := cqlFiles.ReadFile("cql/" + entry.Name()) if err != nil { - return xerrors.Errorf("creating tables: executing\n%s\n%w", line, err) + log.Fatalf("failed to read file %s: %v", entry.Name(), err) + } + + lines := strings.Split(string(data), ";") + for _, line := range lines { + line = strings.Trim(line, "\n \t") + if line == "" { + continue + } + log.Debug(line) + err := session.Query(line).WithContext(ctx).Exec() + if err != nil { + return xerrors.Errorf("creating tables: executing\n%s\n%w", line, err) + } } } @@ -134,11 +164,11 @@ func (i *IndexStore) Start(ctx context.Context) error { return nil } -// AddIndex adds multihash -> piece cid mappings, along with offset and size information for the piece. -func (i *IndexStore) AddIndex(ctx context.Context, pieceCid cid.Cid, recordsChan chan Record) error { +// AddIndex adds multihash -> piece cid (v2) mappings, along with offset and size information for the piece. +func (i *IndexStore) AddIndex(ctx context.Context, pieceCidv2 cid.Cid, recordsChan chan Record) error { insertPieceBlockOffsetSize := `INSERT INTO PieceBlockOffsetSize (PieceCid, PayloadMultihash, BlockOffset) VALUES (?, ?, ?)` insertPayloadToPieces := `INSERT INTO PayloadToPieces (PayloadMultihash, PieceCid, BlockSize) VALUES (?, ?, ?)` - pieceCidBytes := pieceCid.Bytes() + pieceCidBytes := pieceCidv2.Bytes() var eg errgroup.Group @@ -161,12 +191,12 @@ func (i *IndexStore) AddIndex(ctx context.Context, pieceCid cid.Cid, recordsChan if !ok { if len(batchPieceBlockOffsetSize.Entries) > 0 { - if err := i.executeBatchWithRetry(ctx, batchPieceBlockOffsetSize, pieceCid); err != nil { + if err := i.executeBatchWithRetry(ctx, batchPieceBlockOffsetSize, pieceCidv2); err != nil { return err } } if len(batchPayloadToPieces.Entries) > 0 { - if err := i.executeBatchWithRetry(ctx, batchPayloadToPieces, pieceCid); err != nil { + if err := i.executeBatchWithRetry(ctx, batchPayloadToPieces, pieceCidv2); err != nil { return err } } @@ -188,13 +218,13 @@ func (i *IndexStore) AddIndex(ctx context.Context, pieceCid cid.Cid, recordsChan }) if len(batchPieceBlockOffsetSize.Entries) == i.settings.InsertBatchSize { - if err := i.executeBatchWithRetry(ctx, batchPieceBlockOffsetSize, pieceCid); err != nil { + if err := i.executeBatchWithRetry(ctx, batchPieceBlockOffsetSize, pieceCidv2); err != nil { return err } batchPieceBlockOffsetSize = nil } if len(batchPayloadToPieces.Entries) == i.settings.InsertBatchSize { - if err := i.executeBatchWithRetry(ctx, batchPayloadToPieces, pieceCid); err != nil { + if err := i.executeBatchWithRetry(ctx, batchPayloadToPieces, pieceCidv2); err != nil { return err } batchPayloadToPieces = nil @@ -212,7 +242,7 @@ func (i *IndexStore) AddIndex(ctx context.Context, pieceCid cid.Cid, recordsChan } // executeBatchWithRetry executes a batch with retry logic and exponential backoff -func (i *IndexStore) executeBatchWithRetry(ctx context.Context, batch *gocql.Batch, pieceCid cid.Cid) error { +func (i *IndexStore) executeBatchWithRetry(ctx context.Context, batch *gocql.Batch, pieceCidv2 cid.Cid) error { var err error maxRetries := 20 backoff := 20 * time.Second @@ -236,11 +266,11 @@ func (i *IndexStore) executeBatchWithRetry(ctx context.Context, batch *gocql.Bat return ctx.Err() } - log.Warnf("Batch insert attempt %d failed for piece %s: %v", attempt+1, pieceCid, err) + log.Warnf("Batch insert attempt %d failed for piece %s: %v", attempt+1, pieceCidv2, err) // If max retries reached, return error if attempt == maxRetries { - return xerrors.Errorf("execute batch: executing batch insert for piece %s: %w", pieceCid, err) + return xerrors.Errorf("execute batch: executing batch insert for piece %s: %w", pieceCidv2, err) } // Sleep for backoff duration before retrying @@ -262,8 +292,8 @@ func (i *IndexStore) executeBatchWithRetry(ctx context.Context, batch *gocql.Bat // RemoveIndexes removes all multihash -> piece cid mappings, and all // offset information for the piece. -func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCid cid.Cid) error { - pieceCidBytes := pieceCid.Bytes() +func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCidv2 cid.Cid) error { + pieceCidBytes := pieceCidv2.Bytes() // First, select all PayloadMultihash for the given PieceCid from PieceBlockOffsetSize selectQry := `SELECT PayloadMultihash FROM PieceBlockOffsetSize WHERE PieceCid = ?` @@ -278,7 +308,7 @@ func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCid cid.Cid) error payloadMultihashes = append(payloadMultihashes, mhCopy) } if err := iter.Close(); err != nil { - return xerrors.Errorf("scanning PayloadMultihash for piece %s: %w", pieceCid, err) + return xerrors.Errorf("scanning PayloadMultihash for piece %s: %w", pieceCidv2, err) } // Prepare batch deletes for PayloadToPieces @@ -294,16 +324,16 @@ func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCid cid.Cid) error }) if len(batch.Entries) >= batchSize || idx == len(payloadMultihashes)-1 { - if err := i.executeBatchWithRetry(ctx, batch, pieceCid); err != nil { - return xerrors.Errorf("executing batch delete for PayloadToPieces for piece %s: %w", pieceCid, err) + if err := i.executeBatchWithRetry(ctx, batch, pieceCidv2); err != nil { + return xerrors.Errorf("executing batch delete for PayloadToPieces for piece %s: %w", pieceCidv2, err) } batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) } } if len(batch.Entries) >= 0 { - if err := i.executeBatchWithRetry(ctx, batch, pieceCid); err != nil { - return xerrors.Errorf("executing batch delete for PayloadToPieces for piece %s: %w", pieceCid, err) + if err := i.executeBatchWithRetry(ctx, batch, pieceCidv2); err != nil { + return xerrors.Errorf("executing batch delete for PayloadToPieces for piece %s: %w", pieceCidv2, err) } } @@ -311,16 +341,16 @@ func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCid cid.Cid) error delPieceBlockOffsetSizeQry := `DELETE FROM PieceBlockOffsetSize WHERE PieceCid = ?` err := i.session.Query(delPieceBlockOffsetSizeQry, pieceCidBytes).WithContext(ctx).Exec() if err != nil { - return xerrors.Errorf("deleting PieceBlockOffsetSize for piece %s: %w", pieceCid, err) + return xerrors.Errorf("deleting PieceBlockOffsetSize for piece %s: %w", pieceCidv2, err) } return nil } -// PieceInfo contains PieceCid and BlockSize +// PieceInfo contains PieceCidV2 and BlockSize type PieceInfo struct { - PieceCid cid.Cid - BlockSize uint64 + PieceCidV2 cid.Cid + BlockSize uint64 } // PiecesContainingMultihash gets all pieces that contain a multihash along with their BlockSize @@ -337,8 +367,8 @@ func (i *IndexStore) PiecesContainingMultihash(ctx context.Context, m multihash. return nil, fmt.Errorf("parsing piece cid: %w", err) } pieces = append(pieces, PieceInfo{ - PieceCid: pcid, - BlockSize: blockSize, + PieceCidV2: pcid, + BlockSize: blockSize, }) } if err := iter.Close(); err != nil { @@ -352,11 +382,11 @@ func (i *IndexStore) PiecesContainingMultihash(ctx context.Context, m multihash. return pieces, nil } -// GetOffset retrieves the offset of a payload in a piece -func (i *IndexStore) GetOffset(ctx context.Context, pieceCid cid.Cid, hash multihash.Multihash) (uint64, error) { +// GetOffset retrieves the offset of a payload in a piece(v2) +func (i *IndexStore) GetOffset(ctx context.Context, pieceCidv2 cid.Cid, hash multihash.Multihash) (uint64, error) { var offset uint64 qryOffset := `SELECT BlockOffset FROM PieceBlockOffsetSize WHERE PieceCid = ? AND PayloadMultihash = ?` - err := i.session.Query(qryOffset, pieceCid.Bytes(), []byte(hash)).WithContext(ctx).Scan(&offset) + err := i.session.Query(qryOffset, pieceCidv2.Bytes(), []byte(hash)).WithContext(ctx).Scan(&offset) if err != nil { return 0, fmt.Errorf("getting offset: %w", err) } @@ -364,9 +394,9 @@ func (i *IndexStore) GetOffset(ctx context.Context, pieceCid cid.Cid, hash multi return offset, nil } -func (i *IndexStore) GetPieceHashRange(ctx context.Context, piece cid.Cid, start multihash.Multihash, num int64) ([]multihash.Multihash, error) { +func (i *IndexStore) GetPieceHashRange(ctx context.Context, piecev2 cid.Cid, start multihash.Multihash, num int64) ([]multihash.Multihash, error) { qry := "SELECT PayloadMultihash FROM PieceBlockOffsetSize WHERE PieceCid = ? AND PayloadMultihash >= ? ORDER BY PayloadMultihash ASC LIMIT ?" - iter := i.session.Query(qry, piece.Bytes(), []byte(start), num).WithContext(ctx).Iter() + iter := i.session.Query(qry, piecev2.Bytes(), []byte(start), num).WithContext(ctx).Iter() var hashes []multihash.Multihash var r []byte @@ -378,7 +408,7 @@ func (i *IndexStore) GetPieceHashRange(ctx context.Context, piece cid.Cid, start r = make([]byte, 0, 36) } if err := iter.Close(); err != nil { - return nil, xerrors.Errorf("iterating piece hash range (P:0x%02x, H:0x%02x, n:%d): %w", piece.Bytes(), []byte(start), num, err) + return nil, xerrors.Errorf("iterating piece hash range (P:0x%02x, H:0x%02x, n:%d): %w", piecev2.Bytes(), []byte(start), num, err) } if len(hashes) != int(num) { return nil, xerrors.Errorf("expected %d hashes, got %d (possibly missing indexes)", num, len(hashes)) @@ -387,9 +417,9 @@ func (i *IndexStore) GetPieceHashRange(ctx context.Context, piece cid.Cid, start return hashes, nil } -func (i *IndexStore) CheckHasPiece(ctx context.Context, piece cid.Cid) (bool, error) { +func (i *IndexStore) CheckHasPiece(ctx context.Context, piecev2 cid.Cid) (bool, error) { qry := "SELECT PayloadMultihash FROM PieceBlockOffsetSize WHERE PieceCid = ? AND PayloadMultihash >= ? ORDER BY PayloadMultihash ASC LIMIT ?" - iter := i.session.Query(qry, piece.Bytes(), []byte{0}, 1).WithContext(ctx).Iter() + iter := i.session.Query(qry, piecev2.Bytes(), []byte{0}, 1).WithContext(ctx).Iter() var hashes []multihash.Multihash var r []byte @@ -401,8 +431,285 @@ func (i *IndexStore) CheckHasPiece(ctx context.Context, piece cid.Cid) (bool, er r = make([]byte, 0, 36) } if err := iter.Close(); err != nil { - return false, xerrors.Errorf("iterating piece hash range (P:0x%02x, n:%d): %w", piece.Bytes(), len(hashes), err) + return false, xerrors.Errorf("iterating piece hash range (P:0x%02x, n:%d): %w", piecev2.Bytes(), len(hashes), err) + } + + return len(hashes) > 0, nil +} + +func (i *IndexStore) InsertAggregateIndex(ctx context.Context, aggregatePieceCid cid.Cid, records []Record) error { + insertAggregateIndex := `INSERT INTO PieceToAggregatePiece (PieceCid, AggregatePieceCid, UnpaddedOffset, UnpaddedLength) VALUES (?, ?, ?, ?)` + aggregatePieceCidBytes := aggregatePieceCid.Bytes() + var batch *gocql.Batch + batchSize := i.settings.InsertBatchSize + + if len(records) == 0 { + return xerrors.Errorf("no records to insert") + } + + for _, r := range records { + if batch == nil { + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + } + + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: insertAggregateIndex, + Args: []interface{}{r.Cid.Bytes(), aggregatePieceCidBytes, r.Offset, r.Size}, + Idempotent: true, + }) + + if len(batch.Entries) >= batchSize { + if err := i.session.ExecuteBatch(batch); err != nil { + return xerrors.Errorf("executing batch insert for aggregate piece %s: %w", aggregatePieceCid, err) + } + batch = nil + } + } + + if batch != nil { + if len(batch.Entries) >= 0 { + if err := i.session.ExecuteBatch(batch); err != nil { + return xerrors.Errorf("executing batch insert for aggregate piece %s: %w", aggregatePieceCid, err) + } + } + } + + return nil +} + +func (i *IndexStore) FindPieceInAggregate(ctx context.Context, pieceCid cid.Cid) ([]Record, error) { + var recs []Record + qry := `SELECT AggregatePieceCid, UnpaddedOffset, UnpaddedLength FROM PieceToAggregatePiece WHERE PieceCid = ?` + iter := i.session.Query(qry, pieceCid.Bytes()).WithContext(ctx).Iter() + var r []byte + var idx, length int64 + for iter.Scan(&r, &idx, &length) { + c, err := cid.Cast(r) + if err != nil { + return nil, xerrors.Errorf("casting aggregate piece cid: %w", err) + } + recs = append(recs, Record{ + Cid: c, + Offset: uint64(idx), + Size: uint64(length), + }) + + r = make([]byte, 0) + } + if err := iter.Close(); err != nil { + return nil, xerrors.Errorf("iterating aggregate piece cid (P:0x%02x): %w", pieceCid.Bytes(), err) + } + return recs, nil +} + +func (i *IndexStore) RemoveAggregateIndex(ctx context.Context, aggregatePieceCid cid.Cid) error { + aggregatePieceCidBytes := aggregatePieceCid.Bytes() + + err := i.session.Query(`DELETE FROM PieceToAggregatePiece WHERE AggregatePieceCid = ?`, aggregatePieceCidBytes).WithContext(ctx).Exec() + if err != nil { + return xerrors.Errorf("deleting aggregate piece cid (P:0x%02x): %w", aggregatePieceCid.Bytes(), err) + } + + return nil +} + +func (i *IndexStore) UpdatePieceCidV1ToV2(ctx context.Context, pieceCidV1 cid.Cid, pieceCidV2 cid.Cid) error { + p1 := pieceCidV1.Bytes() + p2 := pieceCidV2.Bytes() + + // First, select all PayloadMultihash for the given PieceCid from PieceBlockOffsetSize + selectQry := `SELECT PayloadMultihash FROM PieceBlockOffsetSize WHERE PieceCid = ?` + iter := i.session.Query(selectQry, p1).WithContext(ctx).Iter() + + var payloadMultihashBytes []byte + var payloadMultihashes [][]byte + for iter.Scan(&payloadMultihashBytes) { + // Copy the bytes since the slice will be overwritten + mhCopy := make([]byte, len(payloadMultihashBytes)) + copy(mhCopy, payloadMultihashBytes) + payloadMultihashes = append(payloadMultihashes, mhCopy) + } + if err := iter.Close(); err != nil { + return xerrors.Errorf("scanning PayloadMultihash for piece %s: %w", pieceCidV1.String(), err) + } + + // Prepare batch replace for PayloadToPieces + updatePiecesQry := `UPDATE PayloadToPieces SET PieceCid = ? WHERE PayloadMultihash = ? AND PieceCid = ?` + batch := i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + batchSize := i.settings.InsertBatchSize + + for idx, payloadMH := range payloadMultihashes { + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: updatePiecesQry, + Args: []interface{}{p2, payloadMH, p1}, + Idempotent: true, + }) + + if len(batch.Entries) >= batchSize || idx == len(payloadMultihashes)-1 { + if err := i.executeBatchWithRetry(ctx, batch, pieceCidV1); err != nil { + return xerrors.Errorf("executing batch replace for PayloadToPieces for piece %s: %w", pieceCidV1, err) + } + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + } + } + + if len(batch.Entries) >= 0 { + if err := i.executeBatchWithRetry(ctx, batch, pieceCidV1); err != nil { + return xerrors.Errorf("executing batch replace for PayloadToPieces for piece %s: %w", pieceCidV1, err) + } + } + + // Prepare batch replace for PieceBlockOffsetSize + updatePiecesQry = `UPDATE PieceBlockOffsetSize SET PieceCid = ? WHERE PayloadMultihash = ? AND PieceCid = ?` + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + batchSize = i.settings.InsertBatchSize + + for idx, payloadMH := range payloadMultihashes { + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: updatePiecesQry, + Args: []interface{}{p2, payloadMH, p1}, + Idempotent: true, + }) + + if len(batch.Entries) >= batchSize || idx == len(payloadMultihashes)-1 { + if err := i.executeBatchWithRetry(ctx, batch, pieceCidV1); err != nil { + return xerrors.Errorf("executing batch replace for PieceBlockOffsetSize for piece %s: %w", pieceCidV1, err) + } + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + } + } + + if len(batch.Entries) >= 0 { + if err := i.executeBatchWithRetry(ctx, batch, pieceCidV1); err != nil { + return xerrors.Errorf("executing batch replace for PieceBlockOffsetSize for piece %s: %w", pieceCidV1, err) + } + } + + return nil +} + +type NodeDigest struct { + Layer int // Layer index in the merkle Tree + Index int64 // logical index at that layer + Hash [32]byte // 32 bytes +} + +func (i *IndexStore) AddPDPLayer(ctx context.Context, pieceCidV2 cid.Cid, layer []NodeDigest) error { + qry := `INSERT INTO PDPCacheLayer (PieceCid, LayerIndex, Leaf, LeafIndex) VALUES (?, ?, ?, ?)` + pieceCidBytes := pieceCidV2.Bytes() + var batch *gocql.Batch + batchSize := i.settings.InsertBatchSize + + if len(layer) == 0 { + return xerrors.Errorf("no records to insert") + } + + for _, r := range layer { + if batch == nil { + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + } + + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: qry, + Args: []interface{}{pieceCidBytes, r.Layer, r.Hash, r.Index}, + Idempotent: true, + }) + + if len(batch.Entries) >= batchSize { + if err := i.session.ExecuteBatch(batch); err != nil { + return xerrors.Errorf("executing batch insert for PDP cache layer for piece %s: %w", pieceCidV2.String(), err) + } + batch = nil + } + } + + if batch != nil { + if len(batch.Entries) >= 0 { + if err := i.session.ExecuteBatch(batch); err != nil { + return xerrors.Errorf("executing batch insert for PDP cache layer for piece %s: %w", pieceCidV2.String(), err) + } + } + } + + return nil +} + +func (i *IndexStore) GetPDPLayer(ctx context.Context, pieceCidV2 cid.Cid) ([]NodeDigest, error) { + var layer []NodeDigest + qry := `SELECT LayerIndex, Leaf, LeafIndex FROM PDPCacheLayer WHERE PieceCid = ? ORDER BY LeafIndex ASC` + iter := i.session.Query(qry, pieceCidV2.Bytes()).WithContext(ctx).Iter() + r := make([]byte, 32) + var idx int64 + var layerIdx int + for iter.Scan(&layerIdx, &r, &idx) { + layer = append(layer, NodeDigest{ + Layer: layerIdx, + Index: idx, + Hash: [32]byte(r), + }) + r = make([]byte, 32) + } + if err := iter.Close(); err != nil { + return nil, xerrors.Errorf("iterating PDP cache layer (P:0x%02x): %w", pieceCidV2.Bytes(), err) + } + sort.Slice(layer, func(i, j int) bool { + return layer[i].Index < layer[j].Index + }) + return layer, nil +} + +func (i *IndexStore) DeletePDPLayer(ctx context.Context, pieceCidV2 cid.Cid) error { + qry := `DELETE FROM PDPCacheLayer WHERE PieceCid = ?` + if err := i.session.Query(qry, pieceCidV2.Bytes()).WithContext(ctx).Exec(); err != nil { + return xerrors.Errorf("deleting PDP cache layer (P:0x%02x): %w", pieceCidV2.Bytes(), err) + } + return nil +} + +func (i *IndexStore) HasPDPLayer(ctx context.Context, pieceCidV2 cid.Cid) (bool, error) { + qry := `SELECT Leaf FROM PDPCacheLayer WHERE PieceCid = ? LIMIT 1` + iter := i.session.Query(qry, pieceCidV2.Bytes()).WithContext(ctx).Iter() + + var hashes [][]byte + var r []byte + for iter.Scan(&r) { + if r != nil { + hashes = append(hashes, r) + r = make([]byte, 32) + } + } + if err := iter.Close(); err != nil { + return false, xerrors.Errorf("iterating PDP cache layer (P:0x%02x): %w", pieceCidV2.Bytes(), err) } return len(hashes) > 0, nil + +} + +func (i *IndexStore) GetPDPNode(ctx context.Context, pieceCidV2 cid.Cid, index int64) (bool, *NodeDigest, error) { + qry := `SELECT IndexLayer, Leaf, LeafIndex FROM PDPCacheLayer WHERE PieceCid = ? AND LeafIndex = ? LIMIT 1` + iter := i.session.Query(qry, pieceCidV2.Bytes(), index).WithContext(ctx).Iter() + + var node *NodeDigest + + var r []byte + var idx int + var lidx int64 + for iter.Scan(&r, &idx, &lidx) { + if r != nil { + node = &NodeDigest{ + Layer: idx, + Index: lidx, + Hash: [32]byte(r), + } + r = make([]byte, 32) + } + } + if err := iter.Close(); err != nil { + return false, nil, xerrors.Errorf("iterating PDP cache layer (P:0x%02x): %w", pieceCidV2.Bytes(), err) + } + if node != nil { + return true, node, nil + } + return false, nil, nil } diff --git a/market/indexstore/indexstore_test.go b/market/indexstore/indexstore_test.go index 24a6c0693..ee2881611 100644 --- a/market/indexstore/indexstore_test.go +++ b/market/indexstore/indexstore_test.go @@ -5,7 +5,6 @@ import ( "io" "os" "testing" - "time" carv2 "github.com/ipld/go-car/v2" "github.com/ipld/go-car/v2/blockstore" @@ -32,7 +31,8 @@ func TestNewIndexStore(t *testing.T) { ctx := context.Background() cfg := config.DefaultCurioConfig() - idxStore, err := NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, 9042, cfg) + idxStore := NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, 9042, cfg) + err := idxStore.Start(ctx, true) require.NoError(t, err) // Create a car file and calculate commP @@ -42,7 +42,7 @@ func TestNewIndexStore(t *testing.T) { _ = os.RemoveAll(dir) }() - rf, err := testutils.CreateRandomFile(dir, time.Now().Unix(), 8000000) + rf, err := testutils.CreateRandomTmpFile(dir, 8000000) require.NoError(t, err) caropts := []carv2.Option{ @@ -111,10 +111,30 @@ func TestNewIndexStore(t *testing.T) { pcids, err := idxStore.PiecesContainingMultihash(ctx, m) require.NoError(t, err) require.Len(t, pcids, 1) - require.Equal(t, pcids[0].PieceCid.String(), commp.PieceCID.String()) + require.Equal(t, pcids[0].PieceCidV2.String(), commp.PieceCID.String()) // Remove all indexes from the store - err = idxStore.RemoveIndexes(ctx, pcids[0].PieceCid) + err = idxStore.RemoveIndexes(ctx, pcids[0].PieceCidV2) + require.NoError(t, err) + + err = idxStore.session.Query("SELECT * FROM PieceToAggregatePiece").Exec() + require.NoError(t, err) + + aggrRec := Record{ + Cid: commp.PieceCID, + Offset: 0, + Size: 100, + } + + err = idxStore.InsertAggregateIndex(ctx, commp.PieceCID, []Record{aggrRec}) + require.NoError(t, err) + + x, err := idxStore.FindPieceInAggregate(ctx, commp.PieceCID) + require.NoError(t, err) + require.Len(t, x, 1) + require.Equal(t, x[0].Cid, commp.PieceCID) + + err = idxStore.RemoveAggregateIndex(ctx, commp.PieceCID) require.NoError(t, err) // Drop the tables @@ -122,4 +142,6 @@ func TestNewIndexStore(t *testing.T) { require.NoError(t, err) err = idxStore.session.Query("DROP TABLE PieceBlockOffsetSize").Exec() require.NoError(t, err) + err = idxStore.session.Query("DROP TABLE piecetoaggregatepiece").Exec() + require.NoError(t, err) } diff --git a/market/ipni/chunker/serve-chunker.go b/market/ipni/chunker/serve-chunker.go index fa301a411..4e13a1969 100644 --- a/market/ipni/chunker/serve-chunker.go +++ b/market/ipni/chunker/serve-chunker.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/pieceprovider" "github.com/filecoin-project/curio/lib/promise" "github.com/filecoin-project/curio/lib/storiface" @@ -49,7 +50,7 @@ type ServeChunker struct { entryCache *lru.Cache[cid.Cid, *promise.Promise[result.Result[ipniEntry]]] - // small cache keeping track of which piece CIDs shouldn't be skipped. Entries expire after NoSkipCacheTTL + // small cache keeping track of which piece CIDs (v2) shouldn't be skipped. Entries expire after NoSkipCacheTTL noSkipCache *lru.Cache[cid.Cid, time.Time] } @@ -105,11 +106,10 @@ func (p *ServeChunker) getEntry(rctx context.Context, block cid.Cid, speculated if b, ok := p.entryCache.Get(block); ok { v := b.Val(rctx) - switch v.Error { - case nil: + if v.Error == nil { prevChunk = v.Value.Prev return v.Value.Data, nil - case ErrNotFound: + } else if errors.Is(v.Error, ErrNotFound) { log.Errorw("Cached promise skip", "block", block, "prev", prevChunk, "err", err) return v.Value.Data, v.Error } @@ -130,8 +130,8 @@ func (p *ServeChunker) getEntry(rctx context.Context, block cid.Cid, speculated ctx := context.Background() type ipniChunk struct { - PieceCID string `db:"piece_cid"` - FromCar bool `db:"from_car"` + PieceCIDv2 string `db:"piece_cid"` + FromCar bool `db:"from_car"` FirstCID *string `db:"first_cid"` StartOffset *int64 `db:"start_offset"` @@ -169,12 +169,12 @@ func (p *ServeChunker) getEntry(rctx context.Context, block cid.Cid, speculated } chunk := ipniChunks[0] - pieceCid, err := cid.Parse(chunk.PieceCID) + pieceCidv2, err := cid.Parse(chunk.PieceCIDv2) if err != nil { return nil, xerrors.Errorf("parsing piece CID: %w", err) } - if leave, ok := p.noSkipCache.Get(pieceCid); !ok || time.Now().After(leave) { + if leave, ok := p.noSkipCache.Get(pieceCidv2); !ok || time.Now().After(leave) { skip, err := p.checkIsEntrySkip(ctx, block) if err != nil { return nil, xerrors.Errorf("checking entry skipped for block %s: %w", block, err) @@ -185,7 +185,7 @@ func (p *ServeChunker) getEntry(rctx context.Context, block cid.Cid, speculated } } - p.noSkipCache.Add(pieceCid, time.Now().Add(NoSkipCacheTTL)) + p.noSkipCache.Add(pieceCidv2, time.Now().Add(NoSkipCacheTTL)) var next ipld.Link if chunk.PrevCID != nil { @@ -209,23 +209,30 @@ func (p *ServeChunker) getEntry(rctx context.Context, block cid.Cid, speculated firstHash := multihash.Multihash(cb) - return p.reconstructChunkFromDB(ctx, block, pieceCid, firstHash, next, chunk.NumBlocks, speculated) + return p.reconstructChunkFromDB(ctx, block, pieceCidv2, firstHash, next, chunk.NumBlocks, speculated) } - return p.reconstructChunkFromCar(ctx, block, pieceCid, *chunk.StartOffset, next, chunk.NumBlocks, speculated) + return p.reconstructChunkFromCar(ctx, block, pieceCidv2, *chunk.StartOffset, next, chunk.NumBlocks, speculated) } // reconstructChunkFromCar reconstructs a chunk from a car file. -func (p *ServeChunker) reconstructChunkFromCar(ctx context.Context, chunk, piece cid.Cid, startOff int64, next ipld.Link, numBlocks int64, speculate bool) ([]byte, error) { +func (p *ServeChunker) reconstructChunkFromCar(ctx context.Context, chunk, piecev2 cid.Cid, startOff int64, next ipld.Link, numBlocks int64, speculate bool) ([]byte, error) { start := time.Now() - reader, _, err := p.cpr.GetSharedPieceReader(ctx, piece) + commp, err := commcidv2.CommPFromPCidV2(piecev2) + if err != nil { + return nil, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + pi := commp.PieceInfo() + + reader, _, err := p.cpr.GetSharedPieceReader(ctx, piecev2) defer func(reader storiface.Reader) { _ = reader.Close() }(reader) if err != nil { - return nil, xerrors.Errorf("failed to read piece %s for ipni chunk %s reconstruction: %w", piece, chunk, err) + return nil, xerrors.Errorf("failed to read piece %s of size %d for ipni chunk %s reconstruction: %w", pi.PieceCID, pi.Size, chunk, err) } _, err = reader.Seek(startOff, io.SeekStart) @@ -275,18 +282,32 @@ func (p *ServeChunker) reconstructChunkFromCar(ctx context.Context, chunk, piece return nil, xerrors.Errorf("encoding chunk node: %w", err) } - log.Infow("Reconstructing chunk from car", "chunk", chunk, "piece", piece, "startOffset", startOff, "numBlocks", numBlocks, "speculated", speculate, "readMiB", float64(curOff-startOff)/1024/1024, "recomputeTime", time.Since(read), "totalTime", time.Since(start), "ents/s", float64(numBlocks)/time.Since(start).Seconds(), "MiB/s", float64(curOff-startOff)/1024/1024/time.Since(start).Seconds()) + log.Infow("Reconstructing chunk from car", "chunk", chunk, "piece", pi.PieceCID, "size", pi.Size, "startOffset", startOff, "numBlocks", numBlocks, "speculated", speculate, "readMiB", float64(curOff-startOff)/1024/1024, "recomputeTime", time.Since(read), "totalTime", time.Since(start), "ents/s", float64(numBlocks)/time.Since(start).Seconds(), "MiB/s", float64(curOff-startOff)/1024/1024/time.Since(start).Seconds()) return b.Bytes(), nil } // ReconstructChunkFromDB reconstructs a chunk from the database. -func (p *ServeChunker) reconstructChunkFromDB(ctx context.Context, chunk, piece cid.Cid, firstHash multihash.Multihash, next ipld.Link, numBlocks int64, speculate bool) ([]byte, error) { +func (p *ServeChunker) reconstructChunkFromDB(ctx context.Context, chunk, piecev2 cid.Cid, firstHash multihash.Multihash, next ipld.Link, numBlocks int64, speculate bool) ([]byte, error) { start := time.Now() - mhs, err := p.indexStore.GetPieceHashRange(ctx, piece, firstHash, numBlocks) + commp, err := commcidv2.CommPFromPCidV2(piecev2) if err != nil { - return nil, xerrors.Errorf("getting piece hash range: %w", err) + return nil, xerrors.Errorf("getting piece commitment from piece CID v2: %w", err) + } + + pi := commp.PieceInfo() + + var mhs []multihash.Multihash + + // Handle exception for PDP piece announcement with FilecoinPieceHttp{} metadata + if numBlocks == 1 { + mhs = []multihash.Multihash{firstHash} + } else { + mhs, err = p.indexStore.GetPieceHashRange(ctx, piecev2, firstHash, numBlocks) + if err != nil { + return nil, xerrors.Errorf("getting piece hash range: %w", err) + } } // Create the chunk node @@ -316,7 +337,7 @@ func (p *ServeChunker) reconstructChunkFromDB(ctx context.Context, chunk, piece return nil, err } - log.Infow("Reconstructing chunk from DB", "chunk", chunk, "piece", piece, "firstHash", firstHash, "numBlocks", numBlocks, "speculated", speculate, "totalTime", time.Since(start), "ents/s", float64(numBlocks)/time.Since(start).Seconds()) + log.Infow("Reconstructing chunk from DB", "chunk", chunk, "piece", pi.PieceCID, "size", pi.Size, "firstHash", firstHash, "numBlocks", numBlocks, "speculated", speculate, "totalTime", time.Since(start), "ents/s", float64(numBlocks)/time.Since(start).Seconds()) return b.Bytes(), nil } diff --git a/market/ipni/ipni-provider/ipni-provider.go b/market/ipni/ipni-provider/ipni-provider.go index 33478e266..b16497843 100644 --- a/market/ipni/ipni-provider/ipni-provider.go +++ b/market/ipni/ipni-provider/ipni-provider.go @@ -22,7 +22,6 @@ import ( "github.com/ipni/go-libipni/dagsync/ipnisync/head" "github.com/ipni/go-libipni/ingest/schema" "github.com/ipni/go-libipni/maurl" - "github.com/ipni/go-libipni/metadata" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" @@ -103,8 +102,9 @@ func NewProvider(d *deps.Deps) (*Provider, error) { for rows.Next() && rows.Err() == nil { var priv []byte var peerID string + var sp int64 var spID abi.ActorID - err := rows.Scan(&priv, &peerID, &spID) + err := rows.Scan(&priv, &peerID, &sp) if err != nil { return nil, xerrors.Errorf("failed to scan the row: %w", err) } @@ -123,6 +123,10 @@ func NewProvider(d *deps.Deps) (*Provider, error) { return nil, xerrors.Errorf("peer ID mismatch: got %s (calculated), expected %s (DB)", id.String(), peerID) } + if sp < 0 { + spID = abi.ActorID(0) + } + maddr, err := address.NewIDAddress(uint64(spID)) if err != nil { return nil, xerrors.Errorf("parsing miner ID: %w", err) @@ -207,6 +211,7 @@ func (p *Provider) getAd(ctx context.Context, ad cid.Cid, provider string) (sche Addresses string Signature []byte Entries string + Metadata []byte } err := p.db.Select(ctx, &ads, `SELECT @@ -216,7 +221,8 @@ func (p *Provider) getAd(ctx context.Context, ad cid.Cid, provider string) (sche provider, addresses, signature, - entries + entries, + metadata FROM ipni WHERE ad_cid = $1 AND provider = $2`, ad.String(), provider) @@ -240,19 +246,13 @@ func (p *Provider) getAd(ctx context.Context, ad cid.Cid, provider string) (sche return schema.Advertisement{}, xerrors.Errorf("parsing entry CID: %w", err) } - mds := metadata.IpfsGatewayHttp{} - md, err := mds.MarshalBinary() - if err != nil { - return schema.Advertisement{}, xerrors.Errorf("marshalling metadata: %w", err) - } - adv := schema.Advertisement{ Provider: a.Provider, Signature: a.Signature, Entries: cidlink.Link{Cid: e}, ContextID: a.ContextID, IsRm: a.IsRm, - Metadata: md, + Metadata: a.Metadata, } if a.Addresses != "" { diff --git a/market/ipni/ipni-provider/spark.go b/market/ipni/ipni-provider/spark.go index eb661a292..e3b46d44a 100644 --- a/market/ipni/ipni-provider/spark.go +++ b/market/ipni/ipni-provider/spark.go @@ -27,6 +27,9 @@ import ( func (p *Provider) updateSparkContract(ctx context.Context) error { for _, pInfo := range p.keys { pInfo := pInfo + if pInfo.SPID == 0 { + return nil + } mInfo, err := p.full.StateMinerInfo(ctx, pInfo.Miner, types.EmptyTSK) if err != nil { return err diff --git a/market/ipni/types/types.go b/market/ipni/types/types.go new file mode 100644 index 000000000..7248571e2 --- /dev/null +++ b/market/ipni/types/types.go @@ -0,0 +1,50 @@ +package types + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +// PdpIpniContext is used to generate the context bytes for PDP IPNI ads +type PdpIpniContext struct { + // PieceCID is piece CID V2 + PieceCID cid.Cid + + // Payload determines if the IPNI ad is TransportFilecoinPieceHttp or TransportIpfsGatewayHttp + Payload bool +} + +// Marshal encodes the PdpIpniContext into a byte slice containing a single byte for Payload and the byte representation of PieceCID. +func (p *PdpIpniContext) Marshal() ([]byte, error) { + pBytes := p.PieceCID.Bytes() + if len(pBytes) > 63 { + return nil, xerrors.Errorf("piece CID byte length exceeds 63") + } + payloadByte := make([]byte, 1) + if p.Payload { + payloadByte[0] = 1 + } else { + payloadByte[0] = 0 + } + return append(payloadByte, pBytes...), nil +} + +// Unmarshal decodes the provided byte slice into the PdpIpniContext struct, validating its length and extracting the PieceCID and Payload values. +func (p *PdpIpniContext) Unmarshal(b []byte) error { + if len(b) > 64 { + return xerrors.Errorf("byte length exceeds 64") + } + if len(b) < 2 { + return xerrors.Errorf("byte length is less than 2") + } + payload := b[0] == 1 + pcid, err := cid.Cast(b[1:]) + if err != nil { + return err + } + + p.PieceCID = pcid + p.Payload = payload + + return nil +} diff --git a/market/mk12/http/http.go b/market/mk12/http/http.go index eff6a15a7..f9aa00f75 100644 --- a/market/mk12/http/http.go +++ b/market/mk12/http/http.go @@ -22,7 +22,9 @@ import ( storage_market "github.com/filecoin-project/curio/tasks/storage-market" ) -var log = logging.Logger("mktdealhdlr") +const requestTimeout = 10 * time.Second + +var log = logging.Logger("mk12httphdlr") // Redirector struct with a database connection type MK12DealHandler struct { @@ -49,9 +51,9 @@ func NewMK12DealHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_m func Router(mdh *MK12DealHandler) http.Handler { mux := chi.NewRouter() mux.Use(dealRateLimitMiddleware()) - mux.Post("/store", mdh.mk12deal) - mux.Get("/ask", mdh.mk12ask) - mux.Get("/status", mdh.mk12status) + mux.Method("POST", "/store", http.TimeoutHandler(http.HandlerFunc(mdh.mk12deal), requestTimeout, "timeout reading request")) + mux.Method("GET", "/status", http.TimeoutHandler(http.HandlerFunc(mdh.mk12status), requestTimeout, "timeout reading request")) + mux.Method("GET", "/ask", http.TimeoutHandler(http.HandlerFunc(mdh.mk12ask), requestTimeout, "timeout reading request")) return mux } diff --git a/market/mk12/mk12.go b/market/mk12/mk12.go index 9cb872200..e3b01a8ef 100644 --- a/market/mk12/mk12.go +++ b/market/mk12/mk12.go @@ -539,12 +539,12 @@ func (m *MK12) processDeal(ctx context.Context, deal *ProviderDealState) (*Provi // Store the deal n, err := tx.Exec(`INSERT INTO market_mk12_deals (uuid, signed_proposal_cid, - proposal_signature, proposal, proposal_cid, piece_cid, + proposal_signature, proposal, proposal_cid, piece_cid, raw_size, piece_size, offline, verified, sp_id, start_epoch, end_epoch, client_peer_id, fast_retrieval, announce_to_ipni, url, url_headers, label) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19) ON CONFLICT (uuid) DO NOTHING`, - deal.DealUuid.String(), deal.SignedProposalCID.String(), sigByte, propJson, propCid, prop.PieceCID.String(), + deal.DealUuid.String(), deal.SignedProposalCID.String(), sigByte, propJson, propCid, prop.PieceCID.String(), deal.Transfer.Size, prop.PieceSize, deal.IsOffline, prop.VerifiedDeal, mid, prop.StartEpoch, prop.EndEpoch, deal.ClientPeerID.String(), deal.FastRetrieval, deal.AnnounceToIPNI, tInfo.URL, headers, b.Bytes()) @@ -560,7 +560,7 @@ func (m *MK12) processDeal(ctx context.Context, deal *ProviderDealState) (*Provi if !deal.IsOffline { var pieceID int64 // Attempt to select the piece ID first - err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, prop.PieceCID.String()).Scan(&pieceID) + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2`, prop.PieceCID.String(), prop.PieceSize).Scan(&pieceID) if err != nil { if errors.Is(err, pgx.ErrNoRows) { diff --git a/market/mk20/client/auth.go b/market/mk20/client/auth.go new file mode 100644 index 000000000..fdc57984a --- /dev/null +++ b/market/mk20/client/auth.go @@ -0,0 +1,42 @@ +package client + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "time" +) + +// Signer abstracts the signature operation (ed25519, secp256k1, …). +type Signer interface { + // Sign signs the supplied digest and returns raw signature bytes. + Sign(digest []byte) ([]byte, error) + // PublicKeyBytes returns the raw public‑key bytes (no multibase / address). + PublicKeyBytes() []byte + // Type returns a short string identifying the key algorithm ("ed25519", …). + Type() string +} + +// HourlyCurioAuthHeader returns a HTTPClient Option that injects “CurioAuth …” +// on every request using the algorithm defined in the OpenAPI spec. +func HourlyCurioAuthHeader(s Signer) Option { + return WithAuth(func(_ context.Context) (string, string, error) { + now := time.Now().UTC().Truncate(time.Hour) + msg := bytes.Join([][]byte{s.PublicKeyBytes(), []byte(now.Format(time.RFC3339))}, []byte{}) + digest := sha256.Sum256(msg) + + sig, err := s.Sign(digest[:]) + if err != nil { + return "", "", err + } + + header := fmt.Sprintf("CurioAuth %s:%s:%s", + s.Type(), + base64.StdEncoding.EncodeToString(s.PublicKeyBytes()), + base64.StdEncoding.EncodeToString(sig), + ) + return "Authorization", header, nil + }) +} diff --git a/market/mk20/client/client.go b/market/mk20/client/client.go new file mode 100644 index 000000000..414774362 --- /dev/null +++ b/market/mk20/client/client.go @@ -0,0 +1,535 @@ +package client + +import ( + "bytes" + "context" + "crypto/rand" + "io" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/oklog/ulid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/curio/market/mk20" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/wallet" +) + +var log = logging.Logger("mk20-client") + +type Client struct { + http *HTTPClient +} + +func NewClient(baseURL string, client address.Address, wallet *wallet.LocalWallet) *Client { + s := NewAuth(client, wallet) + hclient := NewHTTPClient(baseURL, HourlyCurioAuthHeader(s)) + return &Client{ + http: hclient, + } +} + +func (c *Client) CreateDataSet(ctx context.Context, client, recordKeeper string, extraData []byte) (ulid.ULID, error) { + id, err := ulid.New(ulid.Now(), rand.Reader) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create ULID: %w", err) + } + + deal := &mk20.Deal{ + Identifier: id, + Client: client, + Products: mk20.Products{ + PDPV1: &mk20.PDPV1{ + CreateDataSet: true, + RecordKeeper: recordKeeper, + ExtraData: extraData, + }, + }, + } + + rerr := c.http.Store(ctx, deal) + if rerr.Error != nil { + return ulid.ULID{}, rerr.Error + } + if rerr.Status != 200 { + return ulid.ULID{}, rerr.HError() + } + return id, nil +} + +func (c *Client) RemoveDataSet(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64) (ulid.ULID, error) { + if dataSetID == nil { + return ulid.ULID{}, nil + } + + id, err := ulid.New(ulid.Now(), rand.Reader) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create ULID: %w", err) + } + + deal := &mk20.Deal{ + Identifier: id, + Client: client, + Products: mk20.Products{ + PDPV1: &mk20.PDPV1{ + DeleteDataSet: true, + DataSetID: dataSetID, + RecordKeeper: recordKeeper, + ExtraData: extraData, + }, + }, + } + + rerr := c.http.Store(ctx, deal) + if rerr.Error != nil { + return ulid.ULID{}, rerr.Error + } + if rerr.Status != 200 { + return ulid.ULID{}, rerr.HError() + } + return id, nil +} + +func (c *Client) addPiece(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, dataSource *mk20.DataSource, ret *mk20.RetrievalV1) (ulid.ULID, error) { + if dataSetID == nil { + return ulid.ULID{}, nil + } + + id, err := ulid.New(ulid.Now(), rand.Reader) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create ULID: %w", err) + } + + deal := &mk20.Deal{ + Identifier: id, + Client: client, + Data: dataSource, + Products: mk20.Products{ + PDPV1: &mk20.PDPV1{ + AddPiece: true, + DataSetID: dataSetID, + RecordKeeper: recordKeeper, + ExtraData: extraData, + }, + RetrievalV1: ret, + }, + } + + rerr := c.http.Store(ctx, deal) + if rerr.Error != nil { + return ulid.ULID{}, rerr.Error + } + if rerr.Status != 200 { + return ulid.ULID{}, rerr.HError() + } + return id, nil +} + +func (c *Client) RemovePiece(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, pieceIDs []uint64) (ulid.ULID, error) { + if dataSetID == nil { + return ulid.ULID{}, xerrors.Errorf("dataSetID is required") + } + + if len(pieceIDs) == 0 { + return ulid.ULID{}, xerrors.Errorf("at least one pieceID is required") + } + + id, err := ulid.New(ulid.Now(), rand.Reader) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create ULID: %w", err) + } + + deal := &mk20.Deal{ + Identifier: id, + Client: client, + Products: mk20.Products{ + PDPV1: &mk20.PDPV1{ + DeletePiece: true, + DataSetID: dataSetID, + RecordKeeper: recordKeeper, + ExtraData: extraData, + PieceIDs: pieceIDs, + }, + }, + } + + rerr := c.http.Store(ctx, deal) + if rerr.Error != nil { + return ulid.ULID{}, rerr.Error + } + if rerr.Status != 200 { + return ulid.ULID{}, rerr.HError() + } + return id, nil +} + +func (c *Client) CreateDataSource(pieceCID cid.Cid, car, raw, aggregate, index, withCDN bool, aggregateType mk20.AggregateType, sub []mk20.DataSource) (*mk20.Deal, error) { + if car && raw && aggregate || car && raw || car && aggregate || raw && aggregate { + return nil, xerrors.Errorf("only one data format is supported") + } + + if !car && (index || withCDN) { + return nil, xerrors.Errorf("only car data format supports IPFS style CDN retrievals") + } + + err := mk20.ValidatePieceCID(pieceCID) + if err != nil { + return nil, err + } + + dataSource := &mk20.DataSource{ + PieceCID: pieceCID, + } + + if car { + dataSource.Format.Car = &mk20.FormatCar{} + } + + if raw { + dataSource.Format.Raw = &mk20.FormatBytes{} + } + + if aggregate { + if len(sub) <= 1 { + return nil, xerrors.Errorf("must provide at least two sub data source") + } + + if aggregateType == mk20.AggregateTypeNone { + return nil, xerrors.Errorf("must provide valid aggregateType") + } + + dataSource.Format.Aggregate = &mk20.FormatAggregate{ + Type: aggregateType, + Sub: sub, + } + } + + ret := &mk20.Deal{ + Data: dataSource, + Products: mk20.Products{ + RetrievalV1: &mk20.RetrievalV1{ + Indexing: index, + AnnouncePiece: true, + AnnouncePayload: withCDN, + }, + }, + } + + return ret, nil +} + +func (c *Client) AddPieceWithHTTP(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, pieceCID cid.Cid, car, raw, index, withCDN bool, aggregateType mk20.AggregateType, sub []mk20.DataSource, urls []mk20.HttpUrl) (ulid.ULID, error) { + var aggregate bool + + if aggregateType == mk20.AggregateTypeV1 { + aggregate = true + } + + d, err := c.CreateDataSource(pieceCID, car, raw, aggregate, index, withCDN, aggregateType, sub) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create data source: %w", err) + } + + d.Data.SourceHTTP = &mk20.DataSourceHTTP{ + URLs: urls, + } + + return c.addPiece(ctx, client, recordKeeper, extraData, dataSetID, d.Data, d.Products.RetrievalV1) +} + +func (c *Client) AddPieceWithAggregate(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, pieceCID cid.Cid, index, withCDN bool, aggregateType mk20.AggregateType, sub []mk20.DataSource) (ulid.ULID, error) { + d, err := c.CreateDataSource(pieceCID, false, false, true, index, withCDN, aggregateType, sub) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create data source: %w", err) + } + + d.Data.SourceAggregate = &mk20.DataSourceAggregate{ + Pieces: sub, + } + + d.Data.Format.Aggregate.Sub = nil + + return c.addPiece(ctx, client, recordKeeper, extraData, dataSetID, d.Data, d.Products.RetrievalV1) +} + +func (c *Client) AddPieceWithPut(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, pieceCID cid.Cid, car, raw, index, withCDN bool, aggregateType mk20.AggregateType, sub []mk20.DataSource) (ulid.ULID, error) { + var aggregate bool + + if aggregateType == mk20.AggregateTypeV1 { + aggregate = true + } + + d, err := c.CreateDataSource(pieceCID, car, raw, aggregate, index, withCDN, aggregateType, sub) + if err != nil { + return ulid.ULID{}, xerrors.Errorf("failed to create data source: %w", err) + } + + d.Data.SourceHttpPut = &mk20.DataSourceHttpPut{} + + return c.addPiece(ctx, client, recordKeeper, extraData, dataSetID, d.Data, d.Products.RetrievalV1) +} + +func (c *Client) AddPieceWithPutStreaming(ctx context.Context, client, recordKeeper string, extraData []byte, dataSetID *uint64, car, raw, aggregate, index, withCDN bool) (ulid.ULID, error) { + if car && raw && aggregate || car && raw || car && aggregate || raw && aggregate { + return ulid.ULID{}, xerrors.Errorf("only one data format is supported") + } + + if !car && (index || withCDN) { + return ulid.ULID{}, xerrors.Errorf("only car data format supports IPFS style CDN retrievals") + } + + ret := &mk20.RetrievalV1{ + Indexing: index, + AnnouncePiece: true, + AnnouncePayload: withCDN, + } + + return c.addPiece(ctx, client, recordKeeper, extraData, dataSetID, nil, ret) +} + +func (c *Client) DealStatus(ctx context.Context, dealID string) (*mk20.DealProductStatusResponse, error) { + id, err := ulid.Parse(dealID) + if err != nil { + return nil, xerrors.Errorf("parsing deal id: %w", err) + } + + status, rerr := c.http.Status(ctx, id) + if rerr.Error != nil { + return nil, rerr.Error + } + if rerr.Status != 200 { + return nil, rerr.HError() + } + + return status, nil +} + +func (c *Client) DealUpdate(ctx context.Context, dealID string, deal *mk20.Deal) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + rerr := c.http.Update(ctx, id, deal) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealUploadSerial(ctx context.Context, dealID string, r io.Reader) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + rerr := c.http.UploadSerial(ctx, id, r) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealUploadSerialFinalize(ctx context.Context, dealID string, deal *mk20.Deal) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + rerr := c.http.UploadSerialFinalize(ctx, id, deal) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealChunkUploadInit(ctx context.Context, dealID string, fileSize, chunkSize int64) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + metadata := &mk20.StartUpload{ + RawSize: uint64(fileSize), + ChunkSize: chunkSize, + } + rerr := c.http.UploadInit(ctx, id, metadata) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealChunkUpload(ctx context.Context, dealID string, chunk int, r io.Reader) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + rerr := c.http.UploadChunk(ctx, id, chunk, r) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealChunkUploadFinalize(ctx context.Context, dealID string, deal *mk20.Deal) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + rerr := c.http.UploadSerialFinalize(ctx, id, deal) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func (c *Client) DealChunkedUpload(ctx context.Context, dealID string, size, chunkSize int64, r io.ReaderAt) error { + id, err := ulid.Parse(dealID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + metadata := &mk20.StartUpload{ + RawSize: uint64(size), + ChunkSize: chunkSize, + } + + _, rerr := c.http.UploadStatus(ctx, id) + if rerr.Error != nil { + return rerr.Error + } + + if rerr.Status != 200 && rerr.Status != int(mk20.UploadStatusCodeUploadNotStarted) { + return rerr.HError() + } + + if rerr.Status == int(mk20.UploadStatusCodeUploadNotStarted) { + // Start the upload + rerr = c.http.UploadInit(ctx, id, metadata) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + } + + numChunks := int((size + chunkSize - 1) / chunkSize) + + for { + status, rerr := c.http.UploadStatus(ctx, id) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + + log.Debugw("upload status", "status", status) + + if status.TotalChunks != numChunks { + return xerrors.Errorf("expected %d chunks, got %d", numChunks, status.TotalChunks) + } + + if status.Missing == 0 { + break + } + + log.Warnw("missing chunks", "missing", status.Missing) + // Try to upload missing chunks + for _, chunk := range status.MissingChunks { + start := int64(chunk-1) * chunkSize + end := start + chunkSize + if end > size { + end = size + } + log.Debugw("uploading chunk", "start", start, "end", end) + buf := make([]byte, end-start) + _, err := r.ReadAt(buf, start) + if err != nil { + return xerrors.Errorf("failed to read chunk: %w", err) + } + + rerr = c.http.UploadChunk(ctx, id, chunk, bytes.NewReader(buf)) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + } + } + + log.Infow("upload complete") + + rerr = c.http.UploadFinalize(ctx, id, nil) + if rerr.Error != nil { + return rerr.Error + } + if rerr.Status != 200 { + return rerr.HError() + } + return nil +} + +func KeyFromClientAddress(clientAddress address.Address) (key string) { + switch clientAddress.Protocol() { + case address.BLS: + return "bls" + case address.SECP256K1: + return "secp256k1" + case address.Delegated: + return "delegated" + default: + return "" + } +} + +type ClientAuth struct { + client address.Address + wallet *wallet.LocalWallet +} + +func (c *ClientAuth) Sign(digest []byte) ([]byte, error) { + sign, err := c.wallet.WalletSign(context.Background(), c.client, digest, lapi.MsgMeta{Type: lapi.MTDealProposal}) + if err != nil { + return nil, err + } + + return sign.MarshalBinary() +} + +func (c *ClientAuth) PublicKeyBytes() []byte { + return c.client.Bytes() +} + +func (c *ClientAuth) Type() string { + return KeyFromClientAddress(c.client) +} + +var _ Signer = &ClientAuth{} + +func NewAuth(client address.Address, wallet *wallet.LocalWallet) Signer { + return &ClientAuth{ + client: client, + wallet: wallet, + } +} diff --git a/market/mk20/client/http_client.go b/market/mk20/client/http_client.go new file mode 100644 index 000000000..ad8a35ff4 --- /dev/null +++ b/market/mk20/client/http_client.go @@ -0,0 +1,237 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "path" + "strconv" + "time" + + "github.com/oklog/ulid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/market/mk20" +) + +const MarketPath = "/market/mk20" + +// HTTPClient is a thin wrapper around Curio Market 2.0 REST endpoints. +type HTTPClient struct { + BaseURL string + HTTP *http.Client + AuthHeader func(context.Context) (key string, value string, err error) +} + +// NewHTTPClient returns a HTTPClient with sane defaults. +func NewHTTPClient(baseURL string, opts ...Option) *HTTPClient { + c := &HTTPClient{ + BaseURL: baseURL + MarketPath, + HTTP: &http.Client{Timeout: 60 * time.Second}, + } + for _, o := range opts { + o(c) + } + return c +} + +// --- options --------------------------------------------------------------- + +type Option func(*HTTPClient) + +func WithAuth(h func(context.Context) (string, string, error)) Option { + return func(c *HTTPClient) { c.AuthHeader = h } +} + +// --- low‑level helper ------------------------------------------------------ + +func (c *HTTPClient) do(ctx context.Context, method, p string, body io.Reader, data bool, v any) *Error { + req, err := http.NewRequestWithContext(ctx, method, c.BaseURL+path.Clean("/"+p), body) + if err != nil { + return &Error{ + Status: 0, + Error: err, + } + } + + if c.AuthHeader == nil { + return &Error{ + Status: 0, + Error: xerrors.Errorf("auth header is required"), + } + } + + k, vHdr, err := c.AuthHeader(ctx) + if err != nil { + return &Error{ + Status: 0, + Error: err, + } + } + req.Header.Set(k, vHdr) + + if body != nil { + if !data { + req.Header.Set("Content-Type", "application/json") + } else { + req.Header.Set("Content-Type", "application/octet-stream") + } + } + + resp, err := c.HTTP.Do(req) + if err != nil { + return &Error{ + Status: 0, + Error: err, + } + } + defer func() { + _ = resp.Body.Close() + }() + + if resp.StatusCode != 200 { + msg, err := io.ReadAll(resp.Body) + if err != nil { + return &Error{Status: resp.StatusCode, Error: err} + } + return &Error{Status: resp.StatusCode, Message: string(msg)} + } + + if v != nil { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return &Error{Status: resp.StatusCode, Error: err} + } + } + return &Error{Status: resp.StatusCode} +} + +// Error wraps non‑2xx responses. +type Error struct { + Status int + Message string + Error error +} + +func (e *Error) HError() error { + return xerrors.Errorf("%s", fmt.Sprintf("curio: %d – %s", e.Status, e.Message)) +} + +// --- public methods (one per path) ---------------------------------------- + +// /contracts +func (c *HTTPClient) Contracts(ctx context.Context) ([]string, *Error) { + var out []string + err := c.do(ctx, http.MethodGet, "/contracts", nil, false, &out) + return out, err +} + +// /products +func (c *HTTPClient) Products(ctx context.Context) ([]string, *Error) { + var out []string + err := c.do(ctx, http.MethodGet, "/products", nil, false, &out) + return out, err +} + +// /sources +func (c *HTTPClient) Sources(ctx context.Context) ([]string, *Error) { + var out []string + err := c.do(ctx, http.MethodGet, "/sources", nil, false, &out) + return out, err +} + +// /status/{id} +func (c *HTTPClient) Status(ctx context.Context, id ulid.ULID) (*mk20.DealProductStatusResponse, *Error) { + var out mk20.DealProductStatusResponse + err := c.do(ctx, http.MethodGet, "/status/"+id.String(), nil, false, &out) + return &out, err +} + +// /store (POST) +func (c *HTTPClient) Store(ctx context.Context, deal *mk20.Deal) *Error { + b, merr := json.Marshal(deal) + if merr != nil { + return &Error{Status: 0, Error: xerrors.Errorf("failed to marshal deal: %w", merr)} + } + err := c.do(ctx, http.MethodPost, "/store", bytes.NewReader(b), false, nil) + return err +} + +// /update/{id} (GET in spec – unusual, but honoured) +func (c *HTTPClient) Update(ctx context.Context, id ulid.ULID, deal *mk20.Deal) *Error { + b, merr := json.Marshal(deal) + if merr != nil { + return &Error{Status: 0, Error: xerrors.Errorf("failed to marshal deal: %w", merr)} + } + err := c.do(ctx, http.MethodGet, "/update/"+id.String(), bytes.NewReader(b), false, nil) + return err +} + +// Serial upload (small files) – PUT /upload/{id} +func (c *HTTPClient) UploadSerial(ctx context.Context, id ulid.ULID, r io.Reader) *Error { + err := c.do(ctx, http.MethodPut, "/upload/"+id.String(), r, true, nil) + return err +} + +// Finalize serial upload – POST /upload/{id} +func (c *HTTPClient) UploadSerialFinalize(ctx context.Context, id ulid.ULID, deal *mk20.Deal) *Error { + var err *Error + if deal != nil { + b, merr := json.Marshal(deal) + if merr != nil { + return &Error{Status: 0, Error: xerrors.Errorf("failed to marshal deal: %w", merr)} + } + err = c.do(ctx, http.MethodPost, "/upload/"+id.String(), bytes.NewReader(b), false, nil) + } else { + err = c.do(ctx, http.MethodPost, "/upload/"+id.String(), nil, false, nil) + } + + return err +} + +// Chunked upload workflow --------------------------------------------------- + +// POST /uploads/{id} +func (c *HTTPClient) UploadInit(ctx context.Context, id ulid.ULID, metadata *mk20.StartUpload) *Error { + if metadata == nil { + return &Error{Status: 0, Error: xerrors.Errorf("metadata is required")} + } + b, merr := json.Marshal(metadata) + if merr != nil { + return &Error{Status: 0, Error: xerrors.Errorf("failed to marshal deal: %w", merr)} + } + err := c.do(ctx, http.MethodPost, "/uploads/"+id.String(), bytes.NewReader(b), false, nil) + return err +} + +// PUT /uploads/{id}/{chunk} +func (c *HTTPClient) UploadChunk(ctx context.Context, id ulid.ULID, chunk int, data io.Reader) *Error { + path := "/uploads/" + id.String() + "/" + strconv.Itoa(chunk) + err := c.do(ctx, http.MethodPut, path, data, true, nil) + return err +} + +// GET /uploads/{id} +func (c *HTTPClient) UploadStatus(ctx context.Context, id ulid.ULID) (*mk20.UploadStatus, *Error) { + var out mk20.UploadStatus + err := c.do(ctx, http.MethodGet, "/uploads/"+id.String(), nil, false, &out) + return &out, err +} + +// POST /uploads/finalize/{id} +func (c *HTTPClient) UploadFinalize(ctx context.Context, id ulid.ULID, deal *mk20.Deal) *Error { + var err *Error + if deal != nil { + b, merr := json.Marshal(deal) + if merr != nil { + return &Error{Status: 0, Error: xerrors.Errorf("failed to marshal deal: %w", merr)} + } + err = c.do(ctx, http.MethodPost, "/uploads/finalize/"+id.String(), bytes.NewReader(b), false, nil) + } else { + err = c.do(ctx, http.MethodPost, "/uploads/finalize/"+id.String(), nil, false, nil) + } + return err +} diff --git a/market/mk20/ddo_v1.go b/market/mk20/ddo_v1.go new file mode 100644 index 000000000..65b9893ee --- /dev/null +++ b/market/mk20/ddo_v1.go @@ -0,0 +1,191 @@ +package mk20 + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum" + eabi "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v16/verifreg" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" +) + +var ErrUnknowContract = errors.New("provider does not work with this market") + +// DDOV1 defines a structure for handling provider, client, and piece manager information with associated contract and notification details +// for a DDO deal handling. +type DDOV1 struct { + + // Provider specifies the address of the provider + Provider address.Address `json:"provider"` + + // Actor providing AuthorizeMessage (like f1/f3 wallet) able to authorize actions such as managing ACLs + PieceManager address.Address `json:"piece_manager"` + + // Duration represents the deal duration in epochs. This value is ignored for the deal with allocationID. + // It must be at least 518400 + Duration abi.ChainEpoch `json:"duration"` + + // AllocationId represents an allocation identifier for the deal. + AllocationId *verifreg.AllocationId `json:"allocation_id,omitempty"` + + // ContractAddress specifies the address of the contract governing the deal + ContractAddress string `json:"contract_address"` + + // ContractDealIDMethod specifies the method name to verify the deal and retrieve the deal ID for a contract + ContractVerifyMethod string `json:"contract_verify_method"` + + // ContractDealIDMethodParams represents encoded parameters for the contract verify method if required by the contract + ContractVerifyMethodParams []byte `json:"contract_verify_method_Params,omitempty"` + + // NotificationAddress specifies the address to which notifications will be relayed to when sector is activated + NotificationAddress string `json:"notification_address"` + + // NotificationPayload holds the notification data typically in a serialized byte array format. + NotificationPayload []byte `json:"notification_payload,omitempty"` +} + +func (d *DDOV1) Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, error) { + code, err := IsProductEnabled(db, d.ProductName()) + if err != nil { + return code, err + } + + if d.Provider == address.Undef || d.Provider.Empty() { + return ErrProductValidationFailed, xerrors.Errorf("provider address is not set") + } + + var mk20disabledMiners []address.Address + for _, m := range cfg.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return ErrServerInternalError, xerrors.Errorf("failed to parse miner string: %s", err) + } + mk20disabledMiners = append(mk20disabledMiners, maddr) + } + + if lo.Contains(mk20disabledMiners, d.Provider) { + return ErrProductValidationFailed, xerrors.Errorf("provider is disabled") + } + + if d.PieceManager == address.Undef || d.PieceManager.Empty() { + return ErrProductValidationFailed, xerrors.Errorf("piece manager address is not set") + } + + if d.AllocationId != nil { + if *d.AllocationId == verifreg.NoAllocationID { + return ErrProductValidationFailed, xerrors.Errorf("incorrect allocation id") + } + } + + if d.AllocationId == nil { + if d.Duration < 518400 { + return ErrDurationTooShort, xerrors.Errorf("duration must be at least 518400") + } + } + + if d.ContractAddress == "" { + return ErrProductValidationFailed, xerrors.Errorf("contract address is not set") + } + + if d.ContractAddress[0:2] != "0x" { + return ErrProductValidationFailed, xerrors.Errorf("contract address must start with 0x") + } + + if d.ContractVerifyMethodParams == nil { + return ErrProductValidationFailed, xerrors.Errorf("contract verify method params is not set") + } + + if d.ContractVerifyMethod == "" { + return ErrProductValidationFailed, xerrors.Errorf("contract verify method is not set") + } + + return Ok, nil +} + +func (d *DDOV1) GetDealID(ctx context.Context, db *harmonydb.DB, eth *ethclient.Client) (int64, DealCode, error) { + if d.ContractAddress == "0xtest" { + v, err := rand.Int(rand.Reader, big.NewInt(10000000)) + if err != nil { + return -1, ErrServerInternalError, xerrors.Errorf("failed to generate random number: %w", err) + } + return v.Int64(), Ok, nil + } + + var abiStr string + err := db.QueryRow(ctx, `SELECT abi FROM ddo_contracts WHERE address = $1`, d.ContractAddress).Scan(&abiStr) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return -1, ErrMarketNotEnabled, ErrUnknowContract + } + return -1, ErrServerInternalError, xerrors.Errorf("getting abi: %w", err) + } + + parsedABI, err := eabi.JSON(strings.NewReader(abiStr)) + if err != nil { + return -1, ErrServerInternalError, xerrors.Errorf("parsing abi: %w", err) + } + + to := common.HexToAddress(d.ContractAddress) + + // Get the method + method, exists := parsedABI.Methods[d.ContractVerifyMethod] + if !exists { + return -1, ErrServerInternalError, fmt.Errorf("method %s not found in ABI", d.ContractVerifyMethod) + } + + // Enforce method must take exactly one `bytes` parameter + if len(method.Inputs) != 1 || method.Inputs[0].Type.String() != "bytes" { + return -1, ErrServerInternalError, fmt.Errorf("method %q must take exactly one argument of type bytes", method.Name) + } + + // ABI-encode method call with input + callData, err := parsedABI.Pack(method.Name, d.ContractVerifyMethod) + if err != nil { + return -1, ErrServerInternalError, fmt.Errorf("failed to encode call data: %w", err) + } + + // Build call message + msg := ethereum.CallMsg{ + To: &to, + Data: callData, + } + + // Call contract + output, err := eth.CallContract(ctx, msg, nil) + if err != nil { + return -1, ErrServerInternalError, fmt.Errorf("eth_call failed: %w", err) + } + + // Decode return value (assume string) + var result int64 + if err := parsedABI.UnpackIntoInterface(&result, method.Name, output); err != nil { + return -1, ErrServerInternalError, fmt.Errorf("decode result: %w", err) + } + + if result == 0 { + return -1, ErrDealRejectedByMarket, fmt.Errorf("empty result from contract") + } + + return result, Ok, nil +} + +func (d *DDOV1) ProductName() ProductName { + return ProductNameDDOV1 +} + +var _ product = &DDOV1{} diff --git a/market/mk20/http/docs.go b/market/mk20/http/docs.go new file mode 100644 index 000000000..12dea1c47 --- /dev/null +++ b/market/mk20/http/docs.go @@ -0,0 +1,1475 @@ +// Package http Code generated by swaggo/swag. DO NOT EDIT +package http + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/contracts": { + "get": { + "description": "List of supported DDO contracts", + "summary": "List of supported DDO contracts", + "responses": { + "200": { + "description": "Array of contract addresses supported by a system or application.", + "schema": { + "$ref": "#/definitions/mk20.SupportedContracts" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/info/": { + "get": { + "description": "- OpenAPI spec UI for the Market 2.0 APIs", + "summary": "OpenAPI Spec UI", + "responses": {} + } + }, + "/info/swagger.json": { + "get": { + "description": "- OpenAPI spec for the Market 2.0 APIs in JSON format", + "summary": "OpenAPI Spec JSON", + "responses": {} + } + }, + "/info/swagger.yaml": { + "get": { + "description": "- OpenAPI spec for the Market 2.0 APIs in YAML format", + "summary": "OpenAPI Spec YAML", + "responses": {} + } + }, + "/products": { + "get": { + "description": "List of supported products", + "summary": "List of supported products", + "responses": { + "200": { + "description": "Array of products supported by the SP", + "schema": { + "$ref": "#/definitions/mk20.SupportedProducts" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/sources": { + "get": { + "description": "List of supported data sources", + "summary": "List of supported data sources", + "responses": { + "200": { + "description": "Array of dats sources supported by the SP", + "schema": { + "$ref": "#/definitions/mk20.SupportedDataSources" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/status/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Current status of MK20 deal per product", + "summary": "Status of the MK20 deal", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "the status response for deal products with their respective deal statuses", + "schema": { + "$ref": "#/definitions/mk20.DealProductStatusResponse" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/store": { + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Make a mk20 deal", + "consumes": [ + "application/json" + ], + "summary": "Make a mk20 deal", + "parameters": [ + { + "description": "mk20.Deal in json format", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/update/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Useful for adding adding additional products and updating PoRep duration", + "consumes": [ + "application/json" + ], + "summary": "Update the deal details of existing deals.", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.Deal in json format", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/upload/{id}": { + "put": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Allows uploading data for deals in a single stream. Suitable for small deals.", + "summary": "Upload the deal data", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "raw binary", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "200": { + "description": "UploadOk indicates a successful upload operation, represented by the HTTP status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "500": { + "description": "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + } + } + }, + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Finalizes the serial upload process once data has been uploaded", + "consumes": [ + "application/json" + ], + "summary": "Finalizes the serial upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.deal in json format", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/uploads/finalize/{id}": { + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Finalizes the upload process once all the chunks are uploaded.", + "consumes": [ + "application/json" + ], + "summary": "Finalizes the upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.deal in json format", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/uploads/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Return a json struct detailing the current status of a deal upload.", + "summary": "Status of deal upload", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The status of a file upload process, including progress and missing chunks", + "schema": { + "$ref": "#/definitions/mk20.UploadStatus" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStatusCodeDealNotFound indicates that the requested deal was not found, corresponding to status code 404", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + }, + "425": { + "description": "UploadStatusCodeUploadNotStarted indicates that the upload process has not started yet", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + }, + "500": { + "description": "UploadStatusCodeServerError indicates an internal server error occurred during the upload process, corresponding to status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + } + } + }, + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Initializes the upload for a deal. Each upload must be initialized before chunks can be uploaded for a deal.", + "consumes": [ + "application/json" + ], + "summary": "Starts the upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Metadata for initiating an upload operation", + "name": "data", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.StartUpload" + } + } + ], + "responses": { + "200": { + "description": "UploadStartCodeOk indicates a successful upload start request with status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "409": { + "description": "UploadStartCodeAlreadyStarted indicates that the upload process has already been initiated and cannot be started again", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "500": { + "description": "UploadStartCodeServerError indicates an error occurred on the server while processing an upload start request", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + } + } + } + }, + "/uploads/{id}/{chunkNum}": { + "put": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Allows uploading chunks for a deal file. Method can be called in parallel to speed up uploads.", + "summary": "Upload a file chunk", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "chunkNum", + "name": "chunkNum", + "in": "path", + "required": true + }, + { + "description": "raw binary", + "name": "data", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "200": { + "description": "UploadOk indicates a successful upload operation, represented by the HTTP status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadNotFound represents an error where the requested upload chunk could not be found, typically corresponding to HTTP status 404", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "409": { + "description": "UploadChunkAlreadyUploaded indicates that the chunk has already been uploaded and cannot be re-uploaded", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "500": { + "description": "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + } + } + } + } + }, + "definitions": { + "address.Address": { + "type": "object" + }, + "github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId": { + "type": "integer", + "format": "int64", + "enum": [ + 0 + ], + "x-enum-varnames": [ + "NoAllocationID" + ] + }, + "http.Header": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "mk20.AggregateType": { + "type": "integer", + "enum": [ + 0, + 1 + ], + "x-enum-varnames": [ + "AggregateTypeNone", + "AggregateTypeV1" + ] + }, + "mk20.DDOV1": { + "type": "object", + "properties": { + "allocation_id": { + "description": "AllocationId represents an allocation identifier for the deal.", + "allOf": [ + { + "$ref": "#/definitions/github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId" + } + ] + }, + "contract_address": { + "description": "ContractAddress specifies the address of the contract governing the deal", + "type": "string" + }, + "contract_verify_method": { + "description": "ContractDealIDMethod specifies the method name to verify the deal and retrieve the deal ID for a contract", + "type": "string" + }, + "contract_verify_method_Params": { + "description": "ContractDealIDMethodParams represents encoded parameters for the contract verify method if required by the contract", + "type": "array", + "items": { + "type": "integer" + } + }, + "duration": { + "description": "Duration represents the deal duration in epochs. This value is ignored for the deal with allocationID.\nIt must be at least 518400", + "type": "integer" + }, + "notification_address": { + "description": "NotificationAddress specifies the address to which notifications will be relayed to when sector is activated", + "type": "string" + }, + "notification_payload": { + "description": "NotificationPayload holds the notification data typically in a serialized byte array format.", + "type": "array", + "items": { + "type": "integer" + } + }, + "piece_manager": { + "description": "Actor providing AuthorizeMessage (like f1/f3 wallet) able to authorize actions such as managing ACLs", + "allOf": [ + { + "$ref": "#/definitions/address.Address" + } + ] + }, + "provider": { + "description": "Provider specifies the address of the provider", + "allOf": [ + { + "$ref": "#/definitions/address.Address" + } + ] + } + } + }, + "mk20.DataSource": { + "type": "object", + "properties": { + "format": { + "description": "Format defines the format of the piece data, which can include CAR, Aggregate, or Raw formats.", + "allOf": [ + { + "$ref": "#/definitions/mk20.PieceDataFormat" + } + ] + }, + "piece_cid": { + "description": "PieceCID represents the unique identifier (pieceCID V2) for a piece of data, stored as a CID object.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "example": { + "/": "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq" + } + }, + "source_aggregate": { + "description": "SourceAggregate represents an aggregated source, comprising multiple data sources as pieces.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceAggregate" + } + ] + }, + "source_http": { + "description": "SourceHTTP represents the HTTP-based source of piece data within a deal, including raw size and URLs for retrieval.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceHTTP" + } + ] + }, + "source_http_put": { + "description": "SourceHttpPut allow clients to push piece data after deal is accepted", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceHttpPut" + } + ] + }, + "source_offline": { + "description": "SourceOffline defines the data source for offline pieces, including raw size information.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceOffline" + } + ] + } + } + }, + "mk20.DataSourceAggregate": { + "type": "object", + "properties": { + "pieces": { + "type": "array", + "items": { + "$ref": "#/definitions/mk20.DataSource" + } + } + } + }, + "mk20.DataSourceHTTP": { + "type": "object", + "properties": { + "urls": { + "description": "URLs lists the HTTP endpoints where the piece data can be fetched.", + "type": "array", + "items": { + "$ref": "#/definitions/mk20.HttpUrl" + } + } + } + }, + "mk20.DataSourceHttpPut": { + "type": "object" + }, + "mk20.DataSourceOffline": { + "type": "object" + }, + "mk20.Deal": { + "type": "object", + "properties": { + "client": { + "description": "Client wallet string for the deal", + "type": "string" + }, + "data": { + "description": "Data represents the source of piece data and associated metadata.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSource" + } + ] + }, + "identifier": { + "description": "Identifier represents a unique identifier for the deal in ULID format.", + "type": "string", + "format": "ulid", + "example": "01ARZ3NDEKTSV4RRFFQ69G5FAV" + }, + "products": { + "description": "Products represents a collection of product-specific information associated with a deal", + "allOf": [ + { + "$ref": "#/definitions/mk20.Products" + } + ] + } + } + }, + "mk20.DealCode": { + "type": "integer", + "enum": [ + 200, + 401, + 400, + 404, + 430, + 422, + 423, + 424, + 425, + 426, + 500, + 503, + 429, + 440, + 441 + ], + "x-enum-varnames": [ + "Ok", + "ErrUnAuthorized", + "ErrBadProposal", + "ErrDealNotFound", + "ErrMalformedDataSource", + "ErrUnsupportedDataSource", + "ErrUnsupportedProduct", + "ErrProductNotEnabled", + "ErrProductValidationFailed", + "ErrDealRejectedByMarket", + "ErrServerInternalError", + "ErrServiceMaintenance", + "ErrServiceOverloaded", + "ErrMarketNotEnabled", + "ErrDurationTooShort" + ] + }, + "mk20.DealProductStatusResponse": { + "type": "object", + "properties": { + "ddo_v1": { + "description": "DDOV1 holds the DealStatusResponse for product \"ddo_v1\".", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealStatusResponse" + } + ] + }, + "pdp_v1": { + "description": "PDPV1 represents the DealStatusResponse for the product pdp_v1.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealStatusResponse" + } + ] + } + } + }, + "mk20.DealState": { + "type": "string", + "enum": [ + "accepted", + "uploading", + "processing", + "sealing", + "indexing", + "failed", + "complete" + ], + "x-enum-varnames": [ + "DealStateAccepted", + "DealStateAwaitingUpload", + "DealStateProcessing", + "DealStateSealing", + "DealStateIndexing", + "DealStateFailed", + "DealStateComplete" + ] + }, + "mk20.DealStatusResponse": { + "type": "object", + "properties": { + "errorMsg": { + "description": "ErrorMsg is an optional field containing error details associated with the deal's current state if an error occurred.", + "type": "string" + }, + "status": { + "description": "State indicates the current processing state of the deal as a DealState value.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealState" + } + ] + } + } + }, + "mk20.FormatAggregate": { + "type": "object", + "properties": { + "sub": { + "description": "Sub holds a slice of DataSource, representing details of sub pieces aggregated under this format.\nThe order must be same as segment index to avoid incorrect indexing of sub pieces in an aggregate", + "type": "array", + "items": { + "$ref": "#/definitions/mk20.DataSource" + } + }, + "type": { + "description": "Type specifies the type of aggregation for data pieces, represented by an AggregateType value.", + "allOf": [ + { + "$ref": "#/definitions/mk20.AggregateType" + } + ] + } + } + }, + "mk20.FormatBytes": { + "type": "object" + }, + "mk20.FormatCar": { + "type": "object" + }, + "mk20.HttpUrl": { + "type": "object", + "properties": { + "fallback": { + "description": "Fallback indicates whether this URL serves as a fallback option when other URLs fail.", + "type": "boolean" + }, + "headers": { + "description": "HTTPHeaders represents the HTTP headers associated with the URL.", + "allOf": [ + { + "$ref": "#/definitions/http.Header" + } + ] + }, + "priority": { + "description": "Priority indicates the order preference for using the URL in requests, with lower values having higher priority.", + "type": "integer" + }, + "url": { + "description": "URL specifies the HTTP endpoint where the piece data can be fetched.", + "type": "string" + } + } + }, + "mk20.PDPV1": { + "type": "object", + "properties": { + "add_piece": { + "description": "AddPiece indicated that this deal is meant to add Piece to a given DataSet. DataSetID must be defined.", + "type": "boolean" + }, + "create_data_set": { + "description": "CreateDataSet indicated that this deal is meant to create a new DataSet for the client by storage provider.", + "type": "boolean" + }, + "data_set_id": { + "description": "DataSetID is PDP verified contract dataset ID. It must be defined for all deals except when CreateDataSet is true.", + "type": "integer" + }, + "delete_data_set": { + "description": "DeleteDataSet indicated that this deal is meant to delete an existing DataSet created by SP for the client.\nDataSetID must be defined.", + "type": "boolean" + }, + "delete_piece": { + "description": "DeletePiece indicates whether the Piece of the data should be deleted. DataSetID must be defined.", + "type": "boolean" + }, + "extra_data": { + "description": "ExtraData can be used to send additional information to service contract when Verifier action like AddRoot, DeleteRoot etc. are performed.", + "type": "array", + "items": { + "type": "integer" + } + }, + "piece_ids": { + "description": "PieceIDs is a list of Piece ids in a proof set.", + "type": "array", + "items": { + "type": "integer" + } + }, + "record_keeper": { + "description": "RecordKeeper specifies the record keeper contract address for the new PDP dataset.", + "type": "string" + } + } + }, + "mk20.PieceDataFormat": { + "type": "object", + "properties": { + "aggregate": { + "description": "Aggregate holds a reference to the aggregated format of piece data.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatAggregate" + } + ] + }, + "car": { + "description": "Car represents the optional CAR file format.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatCar" + } + ] + }, + "raw": { + "description": "Raw represents the raw format of the piece data, encapsulated as bytes.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatBytes" + } + ] + } + } + }, + "mk20.Products": { + "type": "object", + "properties": { + "ddo_v1": { + "description": "DDOV1 represents a product v1 configuration for Direct Data Onboarding (DDO)", + "allOf": [ + { + "$ref": "#/definitions/mk20.DDOV1" + } + ] + }, + "pdp_v1": { + "description": "PDPV1 represents product-specific configuration for PDP version 1 deals.", + "allOf": [ + { + "$ref": "#/definitions/mk20.PDPV1" + } + ] + }, + "retrieval_v1": { + "description": "RetrievalV1 represents configuration for retrieval settings in the system, including indexing and announcement flags.", + "allOf": [ + { + "$ref": "#/definitions/mk20.RetrievalV1" + } + ] + } + } + }, + "mk20.RetrievalV1": { + "type": "object", + "properties": { + "announce_payload": { + "description": "AnnouncePayload indicates whether the payload should be announced to IPNI.", + "type": "boolean" + }, + "announce_piece": { + "description": "AnnouncePiece indicates whether the piece information should be announced to IPNI.", + "type": "boolean" + }, + "indexing": { + "description": "Indexing indicates if the deal is to be indexed in the provider's system to support CIDs based retrieval", + "type": "boolean" + } + } + }, + "mk20.StartUpload": { + "type": "object", + "properties": { + "chunk_size": { + "description": "ChunkSize defines the size of each data chunk to be used during the upload process.", + "type": "integer" + }, + "raw_size": { + "description": "RawSize indicates the total size of the data to be uploaded in bytes.", + "type": "integer" + } + } + }, + "mk20.SupportedContracts": { + "type": "object", + "properties": { + "contracts": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.SupportedDataSources": { + "type": "object", + "properties": { + "sources": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.SupportedProducts": { + "type": "object", + "properties": { + "products": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.UploadCode": { + "type": "integer", + "enum": [ + 200, + 400, + 404, + 409, + 500, + 429 + ], + "x-enum-varnames": [ + "UploadOk", + "UploadBadRequest", + "UploadNotFound", + "UploadChunkAlreadyUploaded", + "UploadServerError", + "UploadRateLimit" + ] + }, + "mk20.UploadStartCode": { + "type": "integer", + "enum": [ + 200, + 400, + 404, + 409, + 500 + ], + "x-enum-varnames": [ + "UploadStartCodeOk", + "UploadStartCodeBadRequest", + "UploadStartCodeDealNotFound", + "UploadStartCodeAlreadyStarted", + "UploadStartCodeServerError" + ] + }, + "mk20.UploadStatus": { + "type": "object", + "properties": { + "missing": { + "description": "Missing represents the number of chunks that are not yet uploaded.", + "type": "integer" + }, + "missing_chunks": { + "description": "MissingChunks is a slice containing the indices of missing chunks.", + "type": "array", + "items": { + "type": "integer" + } + }, + "total_chunks": { + "description": "TotalChunks represents the total number of chunks required for the upload.", + "type": "integer" + }, + "uploaded": { + "description": "Uploaded represents the number of chunks successfully uploaded.", + "type": "integer" + }, + "uploaded_chunks": { + "description": "UploadedChunks is a slice containing the indices of successfully uploaded chunks.", + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "mk20.UploadStatusCode": { + "type": "integer", + "enum": [ + 200, + 404, + 425, + 500 + ], + "x-enum-varnames": [ + "UploadStatusCodeOk", + "UploadStatusCodeDealNotFound", + "UploadStatusCodeUploadNotStarted", + "UploadStatusCodeServerError" + ] + } + }, + "securityDefinitions": { + "CurioAuth": { + "description": "Use the format: ` + "`" + `CurioAuth PublicKeyType:PublicKey:Signature` + "`" + `\n\n- ` + "`" + `PublicKeyType` + "`" + `: String representation of type of wallet (e.g., \"ed25519\", \"bls\", \"secp256k1\")\n- ` + "`" + `PublicKey` + "`" + `: Base64 string of public key bytes\n- ` + "`" + `Signature` + "`" + `: Signature is Base64 string of signature bytes.\n- The client is expected to sign the SHA-256 hash of a message constructed by concatenating the following components, in order.\n- The raw public key bytes (not a human-readable address)\n- The timestamp, truncated to the nearest hour, formatted in RFC3339 (e.g., 2025-07-15T17:00:00Z)\n- These two byte slices are joined without any delimiter between them, and the resulting byte array is then hashed using SHA-256. The signature is performed on that hash.", + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "", + Host: "", + BasePath: "", + Schemes: []string{}, + Title: "Curio Market 2.0 API", + Description: "Curio market APIs", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/market/mk20/http/http.go b/market/mk20/http/http.go new file mode 100644 index 000000000..2551b48d8 --- /dev/null +++ b/market/mk20/http/http.go @@ -0,0 +1,847 @@ +package http + +import ( + "bytes" + "context" + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "runtime" + "runtime/debug" + "strconv" + "time" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/httprate" + logging "github.com/ipfs/go-log/v2" + "github.com/oklog/ulid" + httpSwagger "github.com/swaggo/http-swagger/v2" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/market/mk20" + storage_market "github.com/filecoin-project/curio/tasks/storage-market" +) + +//go:embed swagger.yaml swagger.json docs.go +var swaggerAssets embed.FS + +var log = logging.Logger("mk20httphdlr") + +const version = "1.0.0" + +const requestTimeout = 10 * time.Second + +type MK20DealHandler struct { + cfg *config.CurioConfig + db *harmonydb.DB // Replace with your actual DB wrapper if different + dm *storage_market.CurioStorageDealMarket + disabledMiners []address.Address +} + +func NewMK20DealHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_market.CurioStorageDealMarket) (*MK20DealHandler, error) { + var disabledMiners []address.Address + for _, m := range cfg.Market.StorageMarketConfig.MK12.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return nil, xerrors.Errorf("failed to parse miner string: %s", err) + } + disabledMiners = append(disabledMiners, maddr) + } + return &MK20DealHandler{db: db, dm: dm, cfg: cfg, disabledMiners: disabledMiners}, nil +} + +func dealRateLimitMiddleware() func(http.Handler) http.Handler { + return httprate.LimitByIP(50, 1*time.Second) +} + +func AuthMiddleware(db *harmonydb.DB, cfg *config.CurioConfig) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + http.Error(w, "Missing Authorization header", http.StatusUnauthorized) + return + } + + allowed, client, err := mk20.Auth(authHeader, db, cfg) + if err != nil { + log.Errorw("failed to authenticate request", "err", err) + http.Error(w, "Error during authentication: "+err.Error(), http.StatusInternalServerError) + return + } + + if !allowed { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + + idStr := chi.URLParam(r, "id") + if idStr != "" { + allowed, err := mk20.AuthenticateClient(db, idStr, client) + if err != nil { + log.Errorw("failed to authenticate client", "err", err) + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + if !allowed { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + } + + next.ServeHTTP(w, r) + }) + } +} + +// @title Curio Market 2.0 API +// @description Curio market APIs +func Router(mdh *MK20DealHandler, domainName string) http.Handler { + SwaggerInfo.BasePath = "/market/mk20" + SwaggerInfo.Host = domainName + SwaggerInfo.Version = version + SwaggerInfo.Schemes = []string{"https"} + mux := chi.NewRouter() + mux.Use(dealRateLimitMiddleware()) + mux.Mount("/", APIRouter(mdh)) + mux.Mount("/info", InfoRouter()) + return mux +} + +// @securityDefinitions.apikey CurioAuth +// @in header +// @name Authorization +// @description Use the format: `CurioAuth PublicKeyType:PublicKey:Signature` +// @description +// @description - `PublicKeyType`: String representation of type of wallet (e.g., "ed25519", "bls", "secp256k1") +// @description - `PublicKey`: Base64 string of public key bytes +// @description - `Signature`: Signature is Base64 string of signature bytes. +// @description - The client is expected to sign the SHA-256 hash of a message constructed by concatenating the following components, in order. +// @description - The raw public key bytes (not a human-readable address) +// @description - The timestamp, truncated to the nearest hour, formatted in RFC3339 (e.g., 2025-07-15T17:00:00Z) +// @description - These two byte slices are joined without any delimiter between them, and the resulting byte array is then hashed using SHA-256. The signature is performed on that hash. +func APIRouter(mdh *MK20DealHandler) http.Handler { + mux := chi.NewRouter() + mux.Use(dealRateLimitMiddleware()) + mux.Use(AuthMiddleware(mdh.db, mdh.cfg)) + mux.Method("POST", "/store", http.TimeoutHandler(http.HandlerFunc(mdh.mk20deal), requestTimeout, "request timeout")) + mux.Method("GET", "/status/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20status), requestTimeout, "request timeout")) + mux.Method("POST", "/uploads/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20UploadStart), requestTimeout, "request timeout")) + mux.Method("GET", "/uploads/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20UploadStatus), requestTimeout, "request timeout")) + mux.Put("/uploads/{id}/{chunkNum}", mdh.mk20UploadDealChunks) + mux.Method("POST", "/uploads/finalize/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20FinalizeUpload), requestTimeout, "request timeout")) + mux.Method("POST", "/update/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20UpdateDeal), requestTimeout, "request timeout")) + mux.Method("POST", "/upload/{id}", http.TimeoutHandler(http.HandlerFunc(mdh.mk20SerialUploadFinalize), requestTimeout, "request timeout")) + mux.Method("GET", "/products", http.TimeoutHandler(http.HandlerFunc(mdh.supportedProducts), requestTimeout, "request timeout")) + mux.Method("GET", "/sources", http.TimeoutHandler(http.HandlerFunc(mdh.supportedDataSources), requestTimeout, "request timeout")) + mux.Method("GET", "/contracts", http.TimeoutHandler(http.HandlerFunc(mdh.mk20supportedContracts), requestTimeout, "request timeout")) + mux.Put("/upload/{id}", mdh.mk20SerialUpload) + return mux +} + +// InfoRouter serves OpenAPI specs UI +// @name info +// @Summary OpenAPI Spec UI +// @description - OpenAPI spec UI for the Market 2.0 APIs +// @Router /info/ [get] +// @BasePath /market/mk20 +func InfoRouter() http.Handler { + mux := chi.NewRouter() + mux.Get("/*", httpSwagger.Handler()) + mux.Get("/swagger.yaml", swaggerYaml) + mux.Get("/swagger.json", swaggerJson) + return mux +} + +// @name OpenAPI Spec +// @Summary OpenAPI Spec YAML +// @description - OpenAPI spec for the Market 2.0 APIs in YAML format +// @Router /info/swagger.yaml [get] +func swaggerYaml(w http.ResponseWriter, r *http.Request) { + swaggerYAML, err := swaggerAssets.ReadFile("swagger.yaml") + if err != nil { + log.Errorw("failed to read swagger.yaml", "err", err) + http.Error(w, "failed to read swagger.yaml", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/x-yaml") + _, _ = w.Write(swaggerYAML) +} + +// @name OpenAPI Spec +// @Summary OpenAPI Spec JSON +// @description - OpenAPI spec for the Market 2.0 APIs in JSON format +// @Router /info/swagger.json [get] +func swaggerJson(w http.ResponseWriter, r *http.Request) { + swaggerJSON, err := swaggerAssets.ReadFile("swagger.json") + if err != nil { + log.Errorw("failed to read swagger.json", "err", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(swaggerJSON) +} + +// mk20deal handles HTTP requests to process MK20 deals, parses the request body, validates it, and executes the deal logic. +// @Router /store [post] +// @Summary Make a mk20 deal +// @Description Make a mk20 deal +// @BasePath /market/mk20 +// @Param body body mk20.Deal true "mk20.Deal in json format" +// @Accept json +// @Failure 200 {object} mk20.DealCode "Ok represents a successful operation with an HTTP status code of 200" +// @Failure 400 {object} mk20.DealCode "ErrBadProposal represents a validation error that indicates an invalid or malformed proposal input in the context of validation logic" +// @Failure 404 {object} mk20.DealCode "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404" +// @Failure 430 {object} mk20.DealCode "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data" +// @Failure 422 {object} mk20.DealCode "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context" +// @Failure 423 {object} mk20.DealCode "ErrUnsupportedProduct indicates that the requested product is not supported by the provider" +// @Failure 424 {object} mk20.DealCode "ErrProductNotEnabled indicates that the requested product is not enabled on the provider" +// @Failure 425 {object} mk20.DealCode "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data" +// @Failure 426 {object} mk20.DealCode "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules" +// @Failure 500 {object} mk20.DealCode "ErrServerInternalError indicates an internal server error with a corresponding error code of 500" +// @Failure 503 {object} mk20.DealCode "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503" +// @Failure 429 {object} mk20.DealCode "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment" +// @Failure 440 {object} mk20.DealCode "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation" +// @Failure 441 {object} mk20.DealCode "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20deal(w http.ResponseWriter, r *http.Request) { + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + log.Errorf("panic occurred in mk20deal: %v\n%s", r, trace[:n]) + debug.PrintStack() + } + }() + + ct := r.Header.Get("Content-Type") + var deal mk20.Deal + if ct != "application/json" { + log.Errorf("invalid content type: %s", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + defer func() { + _ = r.Body.Close() + }() + + body, err := io.ReadAll(r.Body) + if err != nil { + log.Errorf("error reading request body: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + log.Infow("received deal proposal", "body", string(body)) + + err = json.Unmarshal(body, &deal) + if err != nil { + log.Errorf("error unmarshaling json: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + http.Error(w, "Missing Authorization header", http.StatusUnauthorized) + return + } + + result := mdh.dm.MK20Handler.ExecuteDeal(context.Background(), &deal, authHeader) + + log.Infow("deal processed", + "id", deal.Identifier, + "HTTPCode", result.HTTPCode, + "Reason", result.Reason) + + w.WriteHeader(int(result.HTTPCode)) + _, err = fmt.Fprint(w, "Reason: ", result.Reason) + if err != nil { + log.Errorw("writing deal response:", "id", deal.Identifier, "error", err) + } +} + +// mk20status handles HTTP requests to fetch the status of a deal by its ID and responding with JSON-encoded results. +// @Router /status/{id} [get] +// @Summary Status of the MK20 deal +// @Description Current status of MK20 deal per product +// @BasePath /market/mk20 +// @Param id path string true "id" +// @Failure 200 {object} mk20.DealProductStatusResponse "the status response for deal products with their respective deal statuses" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @Failure 500 {string} string "Internal Server Error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20status(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + result := mdh.dm.MK20Handler.DealStatus(context.Background(), id) + + if result.HTTPCode != http.StatusOK { + w.WriteHeader(result.HTTPCode) + return + } + + resp, err := json.Marshal(result.Response) + if err != nil { + log.Errorw("failed to marshal deal status response", "id", idStr, "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(resp) + if err != nil { + log.Errorw("failed to write deal status response", "id", idStr, "err", err) + } +} + +// mk20supportedContracts handles HTTP requests to retrieve supported contract addresses and returns them in a JSON response. +// @Router /contracts [get] +// @Summary List of supported DDO contracts +// @Description List of supported DDO contracts +// @BasePath /market/mk20 +// @Failure 200 {object} mk20.SupportedContracts "Array of contract addresses supported by a system or application." +// @Failure 500 {string} string "Internal Server Error" +func (mdh *MK20DealHandler) mk20supportedContracts(w http.ResponseWriter, r *http.Request) { + var contracts mk20.SupportedContracts + err := mdh.db.Select(r.Context(), &contracts, "SELECT address FROM ddo_contracts") + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + log.Errorw("no supported contracts found") + http.Error(w, "no supported contracts found", http.StatusNotFound) + return + } + log.Errorw("failed to get supported contracts", "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + // Write a json array + resp, err := json.Marshal(contracts) + if err != nil { + log.Errorw("failed to marshal supported contracts", "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(resp) + if err != nil { + log.Errorw("failed to write supported contracts", "err", err) + } +} + +// supportedProducts handles HTTP requests to retrieve a list of supported MK20 products and returns them in a JSON response. +// @Router /products [get] +// @Summary List of supported products +// @Description List of supported products +// @BasePath /market/mk20 +// @Failure 500 {string} string "Internal Server Error" +// @Failure 200 {object} mk20.SupportedProducts "Array of products supported by the SP" +func (mdh *MK20DealHandler) supportedProducts(w http.ResponseWriter, r *http.Request) { + prods, _, err := mdh.dm.MK20Handler.Supported(r.Context()) + if err != nil { + log.Errorw("failed to get supported producers and sources", "err", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + var products mk20.SupportedProducts + for k, v := range prods { + if v { + products.Products = append(products.Products, k) + } + } + resp, err := json.Marshal(products) + if err != nil { + log.Errorw("failed to marshal supported products", "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(resp) + if err != nil { + log.Errorw("failed to write supported products", "err", err) + } +} + +// supportedDataSources handles HTTP requests to retrieve the supported data sources in JSON format. +// @Router /sources [get] +// @Summary List of supported data sources +// @Description List of supported data sources +// @BasePath /market/mk20 +// @Failure 500 {string} string "Internal Server Error" +// @Failure 200 {object} mk20.SupportedDataSources "Array of dats sources supported by the SP" +func (mdh *MK20DealHandler) supportedDataSources(w http.ResponseWriter, r *http.Request) { + _, srcs, err := mdh.dm.MK20Handler.Supported(r.Context()) + if err != nil { + log.Errorw("failed to get supported producers and sources", "err", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + var sources mk20.SupportedDataSources + for k, v := range srcs { + if v { + sources.Sources = append(sources.Sources, k) + } + } + resp, err := json.Marshal(sources) + if err != nil { + log.Errorw("failed to marshal supported sources", "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(resp) + if err != nil { + log.Errorw("failed to write supported sources", "err", err) + } +} + +// mk20UploadStatus handles the upload status requests for a given id. +// @Router /uploads/{id} [get] +// @Param id path string true "id" +// @Summary Status of deal upload +// @Description Return a json struct detailing the current status of a deal upload. +// @BasePath /market/mk20 +// @Failure 200 {object} mk20.UploadStatus "The status of a file upload process, including progress and missing chunks" +// @Failure 404 {object} mk20.UploadStatusCode "UploadStatusCodeDealNotFound indicates that the requested deal was not found, corresponding to status code 404" +// @Failure 425 {object} mk20.UploadStatusCode "UploadStatusCodeUploadNotStarted indicates that the upload process has not started yet" +// @Failure 500 {object} mk20.UploadStatusCode "UploadStatusCodeServerError indicates an internal server error occurred during the upload process, corresponding to status code 500" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20UploadStatus(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + mdh.dm.MK20Handler.HandleUploadStatus(r.Context(), id, w) +} + +// mk20UploadDealChunks handles uploading of deal file chunks. +// @Router /uploads/{id}/{chunkNum} [put] +// @Summary Upload a file chunk +// @Description Allows uploading chunks for a deal file. Method can be called in parallel to speed up uploads. +// @BasePath /market/mk20 +// @Param id path string true "id" +// @Param chunkNum path string true "chunkNum" +// @accepts bytes +// @Param data body []byte true "raw binary" +// @Failure 200 {object} mk20.UploadCode "UploadOk indicates a successful upload operation, represented by the HTTP status code 200" +// @Failure 400 {object} mk20.UploadCode "UploadBadRequest represents a bad request error with an HTTP status code of 400" +// @Failure 404 {object} mk20.UploadCode "UploadNotFound represents an error where the requested upload chunk could not be found, typically corresponding to HTTP status 404" +// @Failure 409 {object} mk20.UploadCode "UploadChunkAlreadyUploaded indicates that the chunk has already been uploaded and cannot be re-uploaded" +// @Failure 500 {object} mk20.UploadCode "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20UploadDealChunks(w http.ResponseWriter, r *http.Request) { + ct := r.Header.Get("Content-Type") + if ct != "application/octet-stream" { + log.Errorw("invalid content type", "ct", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + chunk := chi.URLParam(r, "chunkNum") + if chunk == "" { + log.Errorw("missing chunk number in url", "url", r.URL) + http.Error(w, "missing chunk number in url", http.StatusBadRequest) + return + } + + chunkNum, err := strconv.Atoi(chunk) + if err != nil { + log.Errorw("invalid chunk number in url", "url", r.URL) + http.Error(w, "invalid chunk number in url", http.StatusBadRequest) + return + } + + mdh.dm.MK20Handler.HandleUploadChunk(id, chunkNum, r.Body, w) +} + +// mk20UploadStart handles the initiation of an upload process for MK20 deal data. +// @Router /uploads/{id} [post] +// @Summary Starts the upload process +// @Description Initializes the upload for a deal. Each upload must be initialized before chunks can be uploaded for a deal. +// @BasePath /market/mk20 +// @Accept json +// @Param id path string true "id" +// @Param data body mk20.StartUpload true "Metadata for initiating an upload operation" +// @Failure 200 {object} mk20.UploadStartCode "UploadStartCodeOk indicates a successful upload start request with status code 200" +// @Failure 400 {object} mk20.UploadStartCode "UploadStartCodeBadRequest indicates a bad upload start request error with status code 400" +// @Failure 404 {object} mk20.UploadStartCode "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process" +// @Failure 409 {object} mk20.UploadStartCode "UploadStartCodeAlreadyStarted indicates that the upload process has already been initiated and cannot be started again" +// @Failure 500 {object} mk20.UploadStartCode "UploadStartCodeServerError indicates an error occurred on the server while processing an upload start request" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20UploadStart(w http.ResponseWriter, r *http.Request) { + ct := r.Header.Get("Content-Type") + if ct != "application/json" { + log.Errorw("invalid content type", "ct", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + reader := io.LimitReader(r.Body, 4*1024*1024) + b, err := io.ReadAll(reader) + if err != nil { + log.Errorw("failed to read request body", "err", err) + http.Error(w, "failed to read request body", http.StatusBadRequest) + return + } + + upload := mk20.StartUpload{} + err = json.Unmarshal(b, &upload) + if err != nil { + log.Errorw("failed to unmarshal request body", "err", err) + http.Error(w, "failed to unmarshal request body", http.StatusBadRequest) + return + } + + mdh.dm.MK20Handler.HandleUploadStart(r.Context(), id, upload, w) + +} + +// mk20FinalizeUpload finalizes the upload process for a given deal by processing the request and updating the associated deal in the system if required. +// @Router /uploads/finalize/{id} [post] +// @Summary Finalizes the upload process +// @Description Finalizes the upload process once all the chunks are uploaded. +// @BasePath /market/mk20 +// @Param id path string true "id" +// @accepts json +// @Param body body mk20.Deal optional "mk20.deal in json format" +// @Accept json +// @Failure 200 {object} mk20.DealCode "Ok represents a successful operation with an HTTP status code of 200" +// @Failure 400 {object} mk20.DealCode "ErrBadProposal represents a validation error that indicates an invalid or malformed proposal input in the context of validation logic" +// @Failure 404 {object} mk20.DealCode "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404" +// @Failure 430 {object} mk20.DealCode "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data" +// @Failure 422 {object} mk20.DealCode "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context" +// @Failure 423 {object} mk20.DealCode "ErrUnsupportedProduct indicates that the requested product is not supported by the provider" +// @Failure 424 {object} mk20.DealCode "ErrProductNotEnabled indicates that the requested product is not enabled on the provider" +// @Failure 425 {object} mk20.DealCode "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data" +// @Failure 426 {object} mk20.DealCode "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules" +// @Failure 500 {object} mk20.DealCode "ErrServerInternalError indicates an internal server error with a corresponding error code of 500" +// @Failure 503 {object} mk20.DealCode "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503" +// @Failure 429 {object} mk20.DealCode "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment" +// @Failure 440 {object} mk20.DealCode "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation" +// @Failure 441 {object} mk20.DealCode "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20FinalizeUpload(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + http.Error(w, "Missing Authorization header", http.StatusUnauthorized) + return + } + + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + http.Error(w, "error reading request body", http.StatusBadRequest) + return + } + defer func() { + _ = r.Body.Close() + }() + + log.Debugw("received upload finalize proposal", "id", idStr, "body", string(body)) + + if len(bytes.TrimSpace(body)) == 0 { + log.Debugw("no deal provided, using empty deal to finalize upload", "id", idStr) + mdh.dm.MK20Handler.HandleUploadFinalize(id, nil, w, authHeader) + return + } + + ct := r.Header.Get("Content-Type") + if len(ct) == 0 { + http.Error(w, "missing content type", http.StatusBadRequest) + return + } + + if ct != "application/json" { + log.Errorf("invalid content type: %s", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + var deal mk20.Deal + + err = json.Unmarshal(body, &deal) + if err != nil { + log.Errorf("error unmarshaling json: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + mdh.dm.MK20Handler.HandleUploadFinalize(id, &deal, w, authHeader) +} + +// mk20UpdateDeal handles updating an MK20 deal based on the provided HTTP request. +// It validates the deal ID, request content type, and JSON body before updating. +// @Summary Update the deal details of existing deals. +// @Description Useful for adding adding additional products and updating PoRep duration +// @BasePath /market/mk20 +// @Router /update/{id} [get] +// @Param id path string true "id" +// @Accept json +// @Param body body mk20.Deal true "mk20.Deal in json format" +// @Failure 200 {object} mk20.DealCode "Ok represents a successful operation with an HTTP status code of 200" +// @Failure 400 {object} mk20.DealCode "ErrBadProposal represents a validation error that indicates an invalid or malformed proposal input in the context of validation logic" +// @Failure 404 {object} mk20.DealCode "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404" +// @Failure 430 {object} mk20.DealCode "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data" +// @Failure 422 {object} mk20.DealCode "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context" +// @Failure 423 {object} mk20.DealCode "ErrUnsupportedProduct indicates that the requested product is not supported by the provider" +// @Failure 424 {object} mk20.DealCode "ErrProductNotEnabled indicates that the requested product is not enabled on the provider" +// @Failure 425 {object} mk20.DealCode "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data" +// @Failure 426 {object} mk20.DealCode "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules" +// @Failure 500 {object} mk20.DealCode "ErrServerInternalError indicates an internal server error with a corresponding error code of 500" +// @Failure 503 {object} mk20.DealCode "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503" +// @Failure 429 {object} mk20.DealCode "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment" +// @Failure 440 {object} mk20.DealCode "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation" +// @Failure 441 {object} mk20.DealCode "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20UpdateDeal(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + } + + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + ct := r.Header.Get("Content-Type") + var deal mk20.Deal + if ct != "application/json" { + log.Errorf("invalid content type: %s", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + defer func() { + _ = r.Body.Close() + }() + + body, err := io.ReadAll(r.Body) + if err != nil { + log.Errorf("error reading request body: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = json.Unmarshal(body, &deal) + if err != nil { + log.Errorf("error unmarshaling json: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + http.Error(w, "Missing Authorization header", http.StatusUnauthorized) + return + } + + log.Infow("received deal update proposal", "body", string(body)) + + result := mdh.dm.MK20Handler.UpdateDeal(id, &deal, authHeader) + + log.Infow("deal updated", + "id", deal.Identifier, + "HTTPCode", result.HTTPCode, + "Reason", result.Reason) + + w.WriteHeader(int(result.HTTPCode)) + _, err = fmt.Fprint(w, "Reason: ", result.Reason) + if err != nil { + log.Errorw("writing deal update response:", "id", deal.Identifier, "error", err) + } +} + +// mk20SerialUpload handles uploading of deal data in a single stream +// @Router /upload/{id} [put] +// @Summary Upload the deal data +// @Description Allows uploading data for deals in a single stream. Suitable for small deals. +// @BasePath /market/mk20 +// @Param id path string true "id" +// @accepts bytes +// @Param body body []byte true "raw binary" +// @Failure 200 {object} mk20.UploadCode "UploadOk indicates a successful upload operation, represented by the HTTP status code 200" +// @Failure 500 {object} mk20.UploadCode "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500" +// @Failure 404 {object} mk20.UploadStartCode "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20SerialUpload(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + mdh.dm.MK20Handler.HandleSerialUpload(id, r.Body, w) +} + +// mk20SerialUploadFinalize finalizes the serial upload process for a given deal by processing the request and updating the associated deal in the system if required. +// @Router /upload/{id} [post] +// @Summary Finalizes the serial upload process +// @Description Finalizes the serial upload process once data has been uploaded +// @BasePath /market/mk20 +// @Param id path string true "id" +// @accepts json +// @Param body body mk20.Deal optional "mk20.deal in json format" +// @Accept json +// @Failure 200 {object} mk20.DealCode "Ok represents a successful operation with an HTTP status code of 200" +// @Failure 400 {object} mk20.DealCode "ErrBadProposal represents a validation error that indicates an invalid or malformed proposal input in the context of validation logic" +// @Failure 404 {object} mk20.DealCode "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404" +// @Failure 430 {object} mk20.DealCode "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data" +// @Failure 422 {object} mk20.DealCode "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context" +// @Failure 423 {object} mk20.DealCode "ErrUnsupportedProduct indicates that the requested product is not supported by the provider" +// @Failure 424 {object} mk20.DealCode "ErrProductNotEnabled indicates that the requested product is not enabled on the provider" +// @Failure 425 {object} mk20.DealCode "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data" +// @Failure 426 {object} mk20.DealCode "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules" +// @Failure 500 {object} mk20.DealCode "ErrServerInternalError indicates an internal server error with a corresponding error code of 500" +// @Failure 503 {object} mk20.DealCode "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503" +// @Failure 429 {object} mk20.DealCode "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment" +// @Failure 440 {object} mk20.DealCode "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation" +// @Failure 441 {object} mk20.DealCode "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold" +// @Failure 400 {string} string "Bad Request - Invalid input or validation error" +// @security CurioAuth +func (mdh *MK20DealHandler) mk20SerialUploadFinalize(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + if idStr == "" { + log.Errorw("missing id in url", "url", r.URL) + http.Error(w, "missing id in url", http.StatusBadRequest) + return + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + http.Error(w, "Missing Authorization header", http.StatusUnauthorized) + return + } + + id, err := ulid.Parse(idStr) + if err != nil { + log.Errorw("invalid id in url", "id", idStr, "err", err) + http.Error(w, "invalid id in url", http.StatusBadRequest) + return + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + http.Error(w, "error reading request body", http.StatusBadRequest) + return + } + defer func() { + _ = r.Body.Close() + }() + + log.Debugw("received serial upload finalize proposal", "id", idStr, "body", string(body)) + + if len(bytes.TrimSpace(body)) == 0 { + log.Debugw("no deal provided, using empty deal to finalize upload", "id", idStr) + mdh.dm.MK20Handler.HandleSerialUploadFinalize(id, nil, w, authHeader) + return + } + + ct := r.Header.Get("Content-Type") + + var deal mk20.Deal + if ct != "application/json" { + log.Errorf("invalid content type: %s", ct) + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + err = json.Unmarshal(body, &deal) + if err != nil { + log.Errorf("error unmarshaling json: %s", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + mdh.dm.MK20Handler.HandleSerialUploadFinalize(id, &deal, w, authHeader) +} diff --git a/market/mk20/http/swagger.json b/market/mk20/http/swagger.json new file mode 100644 index 000000000..97dcba427 --- /dev/null +++ b/market/mk20/http/swagger.json @@ -0,0 +1,1448 @@ +{ + "swagger": "2.0", + "info": { + "description": "Curio market APIs", + "title": "Curio Market 2.0 API", + "contact": {} + }, + "paths": { + "/contracts": { + "get": { + "description": "List of supported DDO contracts", + "summary": "List of supported DDO contracts", + "responses": { + "200": { + "description": "Array of contract addresses supported by a system or application.", + "schema": { + "$ref": "#/definitions/mk20.SupportedContracts" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/info/": { + "get": { + "description": "- OpenAPI spec UI for the Market 2.0 APIs", + "summary": "OpenAPI Spec UI", + "responses": {} + } + }, + "/info/swagger.json": { + "get": { + "description": "- OpenAPI spec for the Market 2.0 APIs in JSON format", + "summary": "OpenAPI Spec JSON", + "responses": {} + } + }, + "/info/swagger.yaml": { + "get": { + "description": "- OpenAPI spec for the Market 2.0 APIs in YAML format", + "summary": "OpenAPI Spec YAML", + "responses": {} + } + }, + "/products": { + "get": { + "description": "List of supported products", + "summary": "List of supported products", + "responses": { + "200": { + "description": "Array of products supported by the SP", + "schema": { + "$ref": "#/definitions/mk20.SupportedProducts" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/sources": { + "get": { + "description": "List of supported data sources", + "summary": "List of supported data sources", + "responses": { + "200": { + "description": "Array of dats sources supported by the SP", + "schema": { + "$ref": "#/definitions/mk20.SupportedDataSources" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/status/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Current status of MK20 deal per product", + "summary": "Status of the MK20 deal", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "the status response for deal products with their respective deal statuses", + "schema": { + "$ref": "#/definitions/mk20.DealProductStatusResponse" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/store": { + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Make a mk20 deal", + "consumes": [ + "application/json" + ], + "summary": "Make a mk20 deal", + "parameters": [ + { + "description": "mk20.Deal in json format", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/update/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Useful for adding adding additional products and updating PoRep duration", + "consumes": [ + "application/json" + ], + "summary": "Update the deal details of existing deals.", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.Deal in json format", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/upload/{id}": { + "put": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Allows uploading data for deals in a single stream. Suitable for small deals.", + "summary": "Upload the deal data", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "raw binary", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "200": { + "description": "UploadOk indicates a successful upload operation, represented by the HTTP status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "500": { + "description": "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + } + } + }, + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Finalizes the serial upload process once data has been uploaded", + "consumes": [ + "application/json" + ], + "summary": "Finalizes the serial upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.deal in json format", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/uploads/finalize/{id}": { + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Finalizes the upload process once all the chunks are uploaded.", + "consumes": [ + "application/json" + ], + "summary": "Finalizes the upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "mk20.deal in json format", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/mk20.Deal" + } + } + ], + "responses": { + "200": { + "description": "Ok represents a successful operation with an HTTP status code of 200", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "422": { + "description": "ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "423": { + "description": "ErrUnsupportedProduct indicates that the requested product is not supported by the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "424": { + "description": "ErrProductNotEnabled indicates that the requested product is not enabled on the provider", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "425": { + "description": "ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "426": { + "description": "ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "429": { + "description": "ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "430": { + "description": "ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "440": { + "description": "ErrMarketNotEnabled indicates that the market is not enabled for the requested operation", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "441": { + "description": "ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "500": { + "description": "ErrServerInternalError indicates an internal server error with a corresponding error code of 500", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + }, + "503": { + "description": "ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503", + "schema": { + "$ref": "#/definitions/mk20.DealCode" + } + } + } + } + }, + "/uploads/{id}": { + "get": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Return a json struct detailing the current status of a deal upload.", + "summary": "Status of deal upload", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The status of a file upload process, including progress and missing chunks", + "schema": { + "$ref": "#/definitions/mk20.UploadStatus" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStatusCodeDealNotFound indicates that the requested deal was not found, corresponding to status code 404", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + }, + "425": { + "description": "UploadStatusCodeUploadNotStarted indicates that the upload process has not started yet", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + }, + "500": { + "description": "UploadStatusCodeServerError indicates an internal server error occurred during the upload process, corresponding to status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadStatusCode" + } + } + } + }, + "post": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Initializes the upload for a deal. Each upload must be initialized before chunks can be uploaded for a deal.", + "consumes": [ + "application/json" + ], + "summary": "Starts the upload process", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Metadata for initiating an upload operation", + "name": "data", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/mk20.StartUpload" + } + } + ], + "responses": { + "200": { + "description": "UploadStartCodeOk indicates a successful upload start request with status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "409": { + "description": "UploadStartCodeAlreadyStarted indicates that the upload process has already been initiated and cannot be started again", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + }, + "500": { + "description": "UploadStartCodeServerError indicates an error occurred on the server while processing an upload start request", + "schema": { + "$ref": "#/definitions/mk20.UploadStartCode" + } + } + } + } + }, + "/uploads/{id}/{chunkNum}": { + "put": { + "security": [ + { + "CurioAuth": [] + } + ], + "description": "Allows uploading chunks for a deal file. Method can be called in parallel to speed up uploads.", + "summary": "Upload a file chunk", + "parameters": [ + { + "type": "string", + "description": "id", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "chunkNum", + "name": "chunkNum", + "in": "path", + "required": true + }, + { + "description": "raw binary", + "name": "data", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "200": { + "description": "UploadOk indicates a successful upload operation, represented by the HTTP status code 200", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "400": { + "description": "Bad Request - Invalid input or validation error", + "schema": { + "type": "string" + } + }, + "404": { + "description": "UploadNotFound represents an error where the requested upload chunk could not be found, typically corresponding to HTTP status 404", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "409": { + "description": "UploadChunkAlreadyUploaded indicates that the chunk has already been uploaded and cannot be re-uploaded", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + }, + "500": { + "description": "UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500", + "schema": { + "$ref": "#/definitions/mk20.UploadCode" + } + } + } + } + } + }, + "definitions": { + "address.Address": { + "type": "object" + }, + "github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId": { + "type": "integer", + "format": "int64", + "enum": [ + 0 + ], + "x-enum-varnames": [ + "NoAllocationID" + ] + }, + "http.Header": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "mk20.AggregateType": { + "type": "integer", + "enum": [ + 0, + 1 + ], + "x-enum-varnames": [ + "AggregateTypeNone", + "AggregateTypeV1" + ] + }, + "mk20.DDOV1": { + "type": "object", + "properties": { + "allocation_id": { + "description": "AllocationId represents an allocation identifier for the deal.", + "allOf": [ + { + "$ref": "#/definitions/github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId" + } + ] + }, + "contract_address": { + "description": "ContractAddress specifies the address of the contract governing the deal", + "type": "string" + }, + "contract_verify_method": { + "description": "ContractDealIDMethod specifies the method name to verify the deal and retrieve the deal ID for a contract", + "type": "string" + }, + "contract_verify_method_Params": { + "description": "ContractDealIDMethodParams represents encoded parameters for the contract verify method if required by the contract", + "type": "array", + "items": { + "type": "integer" + } + }, + "duration": { + "description": "Duration represents the deal duration in epochs. This value is ignored for the deal with allocationID.\nIt must be at least 518400", + "type": "integer" + }, + "notification_address": { + "description": "NotificationAddress specifies the address to which notifications will be relayed to when sector is activated", + "type": "string" + }, + "notification_payload": { + "description": "NotificationPayload holds the notification data typically in a serialized byte array format.", + "type": "array", + "items": { + "type": "integer" + } + }, + "piece_manager": { + "description": "Actor providing AuthorizeMessage (like f1/f3 wallet) able to authorize actions such as managing ACLs", + "allOf": [ + { + "$ref": "#/definitions/address.Address" + } + ] + }, + "provider": { + "description": "Provider specifies the address of the provider", + "allOf": [ + { + "$ref": "#/definitions/address.Address" + } + ] + } + } + }, + "mk20.DataSource": { + "type": "object", + "properties": { + "format": { + "description": "Format defines the format of the piece data, which can include CAR, Aggregate, or Raw formats.", + "allOf": [ + { + "$ref": "#/definitions/mk20.PieceDataFormat" + } + ] + }, + "piece_cid": { + "description": "PieceCID represents the unique identifier (pieceCID V2) for a piece of data, stored as a CID object.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "example": { + "/": "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq" + } + }, + "source_aggregate": { + "description": "SourceAggregate represents an aggregated source, comprising multiple data sources as pieces.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceAggregate" + } + ] + }, + "source_http": { + "description": "SourceHTTP represents the HTTP-based source of piece data within a deal, including raw size and URLs for retrieval.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceHTTP" + } + ] + }, + "source_http_put": { + "description": "SourceHttpPut allow clients to push piece data after deal is accepted", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceHttpPut" + } + ] + }, + "source_offline": { + "description": "SourceOffline defines the data source for offline pieces, including raw size information.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSourceOffline" + } + ] + } + } + }, + "mk20.DataSourceAggregate": { + "type": "object", + "properties": { + "pieces": { + "type": "array", + "items": { + "$ref": "#/definitions/mk20.DataSource" + } + } + } + }, + "mk20.DataSourceHTTP": { + "type": "object", + "properties": { + "urls": { + "description": "URLs lists the HTTP endpoints where the piece data can be fetched.", + "type": "array", + "items": { + "$ref": "#/definitions/mk20.HttpUrl" + } + } + } + }, + "mk20.DataSourceHttpPut": { + "type": "object" + }, + "mk20.DataSourceOffline": { + "type": "object" + }, + "mk20.Deal": { + "type": "object", + "properties": { + "client": { + "description": "Client wallet string for the deal", + "type": "string" + }, + "data": { + "description": "Data represents the source of piece data and associated metadata.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DataSource" + } + ] + }, + "identifier": { + "description": "Identifier represents a unique identifier for the deal in ULID format.", + "type": "string", + "format": "ulid", + "example": "01ARZ3NDEKTSV4RRFFQ69G5FAV" + }, + "products": { + "description": "Products represents a collection of product-specific information associated with a deal", + "allOf": [ + { + "$ref": "#/definitions/mk20.Products" + } + ] + } + } + }, + "mk20.DealCode": { + "type": "integer", + "enum": [ + 200, + 401, + 400, + 404, + 430, + 422, + 423, + 424, + 425, + 426, + 500, + 503, + 429, + 440, + 441 + ], + "x-enum-varnames": [ + "Ok", + "ErrUnAuthorized", + "ErrBadProposal", + "ErrDealNotFound", + "ErrMalformedDataSource", + "ErrUnsupportedDataSource", + "ErrUnsupportedProduct", + "ErrProductNotEnabled", + "ErrProductValidationFailed", + "ErrDealRejectedByMarket", + "ErrServerInternalError", + "ErrServiceMaintenance", + "ErrServiceOverloaded", + "ErrMarketNotEnabled", + "ErrDurationTooShort" + ] + }, + "mk20.DealProductStatusResponse": { + "type": "object", + "properties": { + "ddo_v1": { + "description": "DDOV1 holds the DealStatusResponse for product \"ddo_v1\".", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealStatusResponse" + } + ] + }, + "pdp_v1": { + "description": "PDPV1 represents the DealStatusResponse for the product pdp_v1.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealStatusResponse" + } + ] + } + } + }, + "mk20.DealState": { + "type": "string", + "enum": [ + "accepted", + "uploading", + "processing", + "sealing", + "indexing", + "failed", + "complete" + ], + "x-enum-varnames": [ + "DealStateAccepted", + "DealStateAwaitingUpload", + "DealStateProcessing", + "DealStateSealing", + "DealStateIndexing", + "DealStateFailed", + "DealStateComplete" + ] + }, + "mk20.DealStatusResponse": { + "type": "object", + "properties": { + "errorMsg": { + "description": "ErrorMsg is an optional field containing error details associated with the deal's current state if an error occurred.", + "type": "string" + }, + "status": { + "description": "State indicates the current processing state of the deal as a DealState value.", + "allOf": [ + { + "$ref": "#/definitions/mk20.DealState" + } + ] + } + } + }, + "mk20.FormatAggregate": { + "type": "object", + "properties": { + "sub": { + "description": "Sub holds a slice of DataSource, representing details of sub pieces aggregated under this format.\nThe order must be same as segment index to avoid incorrect indexing of sub pieces in an aggregate", + "type": "array", + "items": { + "$ref": "#/definitions/mk20.DataSource" + } + }, + "type": { + "description": "Type specifies the type of aggregation for data pieces, represented by an AggregateType value.", + "allOf": [ + { + "$ref": "#/definitions/mk20.AggregateType" + } + ] + } + } + }, + "mk20.FormatBytes": { + "type": "object" + }, + "mk20.FormatCar": { + "type": "object" + }, + "mk20.HttpUrl": { + "type": "object", + "properties": { + "fallback": { + "description": "Fallback indicates whether this URL serves as a fallback option when other URLs fail.", + "type": "boolean" + }, + "headers": { + "description": "HTTPHeaders represents the HTTP headers associated with the URL.", + "allOf": [ + { + "$ref": "#/definitions/http.Header" + } + ] + }, + "priority": { + "description": "Priority indicates the order preference for using the URL in requests, with lower values having higher priority.", + "type": "integer" + }, + "url": { + "description": "URL specifies the HTTP endpoint where the piece data can be fetched.", + "type": "string" + } + } + }, + "mk20.PDPV1": { + "type": "object", + "properties": { + "add_piece": { + "description": "AddPiece indicated that this deal is meant to add Piece to a given DataSet. DataSetID must be defined.", + "type": "boolean" + }, + "create_data_set": { + "description": "CreateDataSet indicated that this deal is meant to create a new DataSet for the client by storage provider.", + "type": "boolean" + }, + "data_set_id": { + "description": "DataSetID is PDP verified contract dataset ID. It must be defined for all deals except when CreateDataSet is true.", + "type": "integer" + }, + "delete_data_set": { + "description": "DeleteDataSet indicated that this deal is meant to delete an existing DataSet created by SP for the client.\nDataSetID must be defined.", + "type": "boolean" + }, + "delete_piece": { + "description": "DeletePiece indicates whether the Piece of the data should be deleted. DataSetID must be defined.", + "type": "boolean" + }, + "extra_data": { + "description": "ExtraData can be used to send additional information to service contract when Verifier action like AddRoot, DeleteRoot etc. are performed.", + "type": "array", + "items": { + "type": "integer" + } + }, + "piece_ids": { + "description": "PieceIDs is a list of Piece ids in a proof set.", + "type": "array", + "items": { + "type": "integer" + } + }, + "record_keeper": { + "description": "RecordKeeper specifies the record keeper contract address for the new PDP dataset.", + "type": "string" + } + } + }, + "mk20.PieceDataFormat": { + "type": "object", + "properties": { + "aggregate": { + "description": "Aggregate holds a reference to the aggregated format of piece data.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatAggregate" + } + ] + }, + "car": { + "description": "Car represents the optional CAR file format.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatCar" + } + ] + }, + "raw": { + "description": "Raw represents the raw format of the piece data, encapsulated as bytes.", + "allOf": [ + { + "$ref": "#/definitions/mk20.FormatBytes" + } + ] + } + } + }, + "mk20.Products": { + "type": "object", + "properties": { + "ddo_v1": { + "description": "DDOV1 represents a product v1 configuration for Direct Data Onboarding (DDO)", + "allOf": [ + { + "$ref": "#/definitions/mk20.DDOV1" + } + ] + }, + "pdp_v1": { + "description": "PDPV1 represents product-specific configuration for PDP version 1 deals.", + "allOf": [ + { + "$ref": "#/definitions/mk20.PDPV1" + } + ] + }, + "retrieval_v1": { + "description": "RetrievalV1 represents configuration for retrieval settings in the system, including indexing and announcement flags.", + "allOf": [ + { + "$ref": "#/definitions/mk20.RetrievalV1" + } + ] + } + } + }, + "mk20.RetrievalV1": { + "type": "object", + "properties": { + "announce_payload": { + "description": "AnnouncePayload indicates whether the payload should be announced to IPNI.", + "type": "boolean" + }, + "announce_piece": { + "description": "AnnouncePiece indicates whether the piece information should be announced to IPNI.", + "type": "boolean" + }, + "indexing": { + "description": "Indexing indicates if the deal is to be indexed in the provider's system to support CIDs based retrieval", + "type": "boolean" + } + } + }, + "mk20.StartUpload": { + "type": "object", + "properties": { + "chunk_size": { + "description": "ChunkSize defines the size of each data chunk to be used during the upload process.", + "type": "integer" + }, + "raw_size": { + "description": "RawSize indicates the total size of the data to be uploaded in bytes.", + "type": "integer" + } + } + }, + "mk20.SupportedContracts": { + "type": "object", + "properties": { + "contracts": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.SupportedDataSources": { + "type": "object", + "properties": { + "sources": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.SupportedProducts": { + "type": "object", + "properties": { + "products": { + "description": "Contracts represents a list of supported contract addresses in string format.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "mk20.UploadCode": { + "type": "integer", + "enum": [ + 200, + 400, + 404, + 409, + 500, + 429 + ], + "x-enum-varnames": [ + "UploadOk", + "UploadBadRequest", + "UploadNotFound", + "UploadChunkAlreadyUploaded", + "UploadServerError", + "UploadRateLimit" + ] + }, + "mk20.UploadStartCode": { + "type": "integer", + "enum": [ + 200, + 400, + 404, + 409, + 500 + ], + "x-enum-varnames": [ + "UploadStartCodeOk", + "UploadStartCodeBadRequest", + "UploadStartCodeDealNotFound", + "UploadStartCodeAlreadyStarted", + "UploadStartCodeServerError" + ] + }, + "mk20.UploadStatus": { + "type": "object", + "properties": { + "missing": { + "description": "Missing represents the number of chunks that are not yet uploaded.", + "type": "integer" + }, + "missing_chunks": { + "description": "MissingChunks is a slice containing the indices of missing chunks.", + "type": "array", + "items": { + "type": "integer" + } + }, + "total_chunks": { + "description": "TotalChunks represents the total number of chunks required for the upload.", + "type": "integer" + }, + "uploaded": { + "description": "Uploaded represents the number of chunks successfully uploaded.", + "type": "integer" + }, + "uploaded_chunks": { + "description": "UploadedChunks is a slice containing the indices of successfully uploaded chunks.", + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "mk20.UploadStatusCode": { + "type": "integer", + "enum": [ + 200, + 404, + 425, + 500 + ], + "x-enum-varnames": [ + "UploadStatusCodeOk", + "UploadStatusCodeDealNotFound", + "UploadStatusCodeUploadNotStarted", + "UploadStatusCodeServerError" + ] + } + }, + "securityDefinitions": { + "CurioAuth": { + "description": "Use the format: `CurioAuth PublicKeyType:PublicKey:Signature`\n\n- `PublicKeyType`: String representation of type of wallet (e.g., \"ed25519\", \"bls\", \"secp256k1\")\n- `PublicKey`: Base64 string of public key bytes\n- `Signature`: Signature is Base64 string of signature bytes.\n- The client is expected to sign the SHA-256 hash of a message constructed by concatenating the following components, in order.\n- The raw public key bytes (not a human-readable address)\n- The timestamp, truncated to the nearest hour, formatted in RFC3339 (e.g., 2025-07-15T17:00:00Z)\n- These two byte slices are joined without any delimiter between them, and the resulting byte array is then hashed using SHA-256. The signature is performed on that hash.", + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } +} \ No newline at end of file diff --git a/market/mk20/http/swagger.yaml b/market/mk20/http/swagger.yaml new file mode 100644 index 000000000..3e01d806f --- /dev/null +++ b/market/mk20/http/swagger.yaml @@ -0,0 +1,1078 @@ +definitions: + address.Address: + type: object + github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId: + enum: + - 0 + format: int64 + type: integer + x-enum-varnames: + - NoAllocationID + http.Header: + additionalProperties: + items: + type: string + type: array + type: object + mk20.AggregateType: + enum: + - 0 + - 1 + type: integer + x-enum-varnames: + - AggregateTypeNone + - AggregateTypeV1 + mk20.DDOV1: + properties: + allocation_id: + allOf: + - $ref: '#/definitions/github_com_filecoin-project_go-state-types_builtin_v16_verifreg.AllocationId' + description: AllocationId represents an allocation identifier for the deal. + contract_address: + description: ContractAddress specifies the address of the contract governing + the deal + type: string + contract_verify_method: + description: ContractDealIDMethod specifies the method name to verify the + deal and retrieve the deal ID for a contract + type: string + contract_verify_method_Params: + description: ContractDealIDMethodParams represents encoded parameters for + the contract verify method if required by the contract + items: + type: integer + type: array + duration: + description: |- + Duration represents the deal duration in epochs. This value is ignored for the deal with allocationID. + It must be at least 518400 + type: integer + notification_address: + description: NotificationAddress specifies the address to which notifications + will be relayed to when sector is activated + type: string + notification_payload: + description: NotificationPayload holds the notification data typically in + a serialized byte array format. + items: + type: integer + type: array + piece_manager: + allOf: + - $ref: '#/definitions/address.Address' + description: Actor providing AuthorizeMessage (like f1/f3 wallet) able to + authorize actions such as managing ACLs + provider: + allOf: + - $ref: '#/definitions/address.Address' + description: Provider specifies the address of the provider + type: object + mk20.DataSource: + properties: + format: + allOf: + - $ref: '#/definitions/mk20.PieceDataFormat' + description: Format defines the format of the piece data, which can include + CAR, Aggregate, or Raw formats. + piece_cid: + additionalProperties: + type: string + description: PieceCID represents the unique identifier (pieceCID V2) for a + piece of data, stored as a CID object. + example: + /: bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq + type: object + source_aggregate: + allOf: + - $ref: '#/definitions/mk20.DataSourceAggregate' + description: SourceAggregate represents an aggregated source, comprising multiple + data sources as pieces. + source_http: + allOf: + - $ref: '#/definitions/mk20.DataSourceHTTP' + description: SourceHTTP represents the HTTP-based source of piece data within + a deal, including raw size and URLs for retrieval. + source_http_put: + allOf: + - $ref: '#/definitions/mk20.DataSourceHttpPut' + description: SourceHttpPut allow clients to push piece data after deal is + accepted + source_offline: + allOf: + - $ref: '#/definitions/mk20.DataSourceOffline' + description: SourceOffline defines the data source for offline pieces, including + raw size information. + type: object + mk20.DataSourceAggregate: + properties: + pieces: + items: + $ref: '#/definitions/mk20.DataSource' + type: array + type: object + mk20.DataSourceHTTP: + properties: + urls: + description: URLs lists the HTTP endpoints where the piece data can be fetched. + items: + $ref: '#/definitions/mk20.HttpUrl' + type: array + type: object + mk20.DataSourceHttpPut: + type: object + mk20.DataSourceOffline: + type: object + mk20.Deal: + properties: + client: + description: Client wallet string for the deal + type: string + data: + allOf: + - $ref: '#/definitions/mk20.DataSource' + description: Data represents the source of piece data and associated metadata. + identifier: + description: Identifier represents a unique identifier for the deal in ULID + format. + example: 01ARZ3NDEKTSV4RRFFQ69G5FAV + format: ulid + type: string + products: + allOf: + - $ref: '#/definitions/mk20.Products' + description: Products represents a collection of product-specific information + associated with a deal + type: object + mk20.DealCode: + enum: + - 200 + - 401 + - 400 + - 404 + - 430 + - 422 + - 423 + - 424 + - 425 + - 426 + - 500 + - 503 + - 429 + - 440 + - 441 + type: integer + x-enum-varnames: + - Ok + - ErrUnAuthorized + - ErrBadProposal + - ErrDealNotFound + - ErrMalformedDataSource + - ErrUnsupportedDataSource + - ErrUnsupportedProduct + - ErrProductNotEnabled + - ErrProductValidationFailed + - ErrDealRejectedByMarket + - ErrServerInternalError + - ErrServiceMaintenance + - ErrServiceOverloaded + - ErrMarketNotEnabled + - ErrDurationTooShort + mk20.DealProductStatusResponse: + properties: + ddo_v1: + allOf: + - $ref: '#/definitions/mk20.DealStatusResponse' + description: DDOV1 holds the DealStatusResponse for product "ddo_v1". + pdp_v1: + allOf: + - $ref: '#/definitions/mk20.DealStatusResponse' + description: PDPV1 represents the DealStatusResponse for the product pdp_v1. + type: object + mk20.DealState: + enum: + - accepted + - uploading + - processing + - sealing + - indexing + - failed + - complete + type: string + x-enum-varnames: + - DealStateAccepted + - DealStateAwaitingUpload + - DealStateProcessing + - DealStateSealing + - DealStateIndexing + - DealStateFailed + - DealStateComplete + mk20.DealStatusResponse: + properties: + errorMsg: + description: ErrorMsg is an optional field containing error details associated + with the deal's current state if an error occurred. + type: string + status: + allOf: + - $ref: '#/definitions/mk20.DealState' + description: State indicates the current processing state of the deal as a + DealState value. + type: object + mk20.FormatAggregate: + properties: + sub: + description: |- + Sub holds a slice of DataSource, representing details of sub pieces aggregated under this format. + The order must be same as segment index to avoid incorrect indexing of sub pieces in an aggregate + items: + $ref: '#/definitions/mk20.DataSource' + type: array + type: + allOf: + - $ref: '#/definitions/mk20.AggregateType' + description: Type specifies the type of aggregation for data pieces, represented + by an AggregateType value. + type: object + mk20.FormatBytes: + type: object + mk20.FormatCar: + type: object + mk20.HttpUrl: + properties: + fallback: + description: Fallback indicates whether this URL serves as a fallback option + when other URLs fail. + type: boolean + headers: + allOf: + - $ref: '#/definitions/http.Header' + description: HTTPHeaders represents the HTTP headers associated with the URL. + priority: + description: Priority indicates the order preference for using the URL in + requests, with lower values having higher priority. + type: integer + url: + description: URL specifies the HTTP endpoint where the piece data can be fetched. + type: string + type: object + mk20.PDPV1: + properties: + add_piece: + description: AddPiece indicated that this deal is meant to add Piece to a + given DataSet. DataSetID must be defined. + type: boolean + create_data_set: + description: CreateDataSet indicated that this deal is meant to create a new + DataSet for the client by storage provider. + type: boolean + data_set_id: + description: DataSetID is PDP verified contract dataset ID. It must be defined + for all deals except when CreateDataSet is true. + type: integer + delete_data_set: + description: |- + DeleteDataSet indicated that this deal is meant to delete an existing DataSet created by SP for the client. + DataSetID must be defined. + type: boolean + delete_piece: + description: DeletePiece indicates whether the Piece of the data should be + deleted. DataSetID must be defined. + type: boolean + extra_data: + description: ExtraData can be used to send additional information to service + contract when Verifier action like AddRoot, DeleteRoot etc. are performed. + items: + type: integer + type: array + piece_ids: + description: PieceIDs is a list of Piece ids in a proof set. + items: + type: integer + type: array + record_keeper: + description: RecordKeeper specifies the record keeper contract address for + the new PDP dataset. + type: string + type: object + mk20.PieceDataFormat: + properties: + aggregate: + allOf: + - $ref: '#/definitions/mk20.FormatAggregate' + description: Aggregate holds a reference to the aggregated format of piece + data. + car: + allOf: + - $ref: '#/definitions/mk20.FormatCar' + description: Car represents the optional CAR file format. + raw: + allOf: + - $ref: '#/definitions/mk20.FormatBytes' + description: Raw represents the raw format of the piece data, encapsulated + as bytes. + type: object + mk20.Products: + properties: + ddo_v1: + allOf: + - $ref: '#/definitions/mk20.DDOV1' + description: DDOV1 represents a product v1 configuration for Direct Data Onboarding + (DDO) + pdp_v1: + allOf: + - $ref: '#/definitions/mk20.PDPV1' + description: PDPV1 represents product-specific configuration for PDP version + 1 deals. + retrieval_v1: + allOf: + - $ref: '#/definitions/mk20.RetrievalV1' + description: RetrievalV1 represents configuration for retrieval settings in + the system, including indexing and announcement flags. + type: object + mk20.RetrievalV1: + properties: + announce_payload: + description: AnnouncePayload indicates whether the payload should be announced + to IPNI. + type: boolean + announce_piece: + description: AnnouncePiece indicates whether the piece information should + be announced to IPNI. + type: boolean + indexing: + description: Indexing indicates if the deal is to be indexed in the provider's + system to support CIDs based retrieval + type: boolean + type: object + mk20.StartUpload: + properties: + chunk_size: + description: ChunkSize defines the size of each data chunk to be used during + the upload process. + type: integer + raw_size: + description: RawSize indicates the total size of the data to be uploaded in + bytes. + type: integer + type: object + mk20.SupportedContracts: + properties: + contracts: + description: Contracts represents a list of supported contract addresses in + string format. + items: + type: string + type: array + type: object + mk20.SupportedDataSources: + properties: + sources: + description: Contracts represents a list of supported contract addresses in + string format. + items: + type: string + type: array + type: object + mk20.SupportedProducts: + properties: + products: + description: Contracts represents a list of supported contract addresses in + string format. + items: + type: string + type: array + type: object + mk20.UploadCode: + enum: + - 200 + - 400 + - 404 + - 409 + - 500 + - 429 + type: integer + x-enum-varnames: + - UploadOk + - UploadBadRequest + - UploadNotFound + - UploadChunkAlreadyUploaded + - UploadServerError + - UploadRateLimit + mk20.UploadStartCode: + enum: + - 200 + - 400 + - 404 + - 409 + - 500 + type: integer + x-enum-varnames: + - UploadStartCodeOk + - UploadStartCodeBadRequest + - UploadStartCodeDealNotFound + - UploadStartCodeAlreadyStarted + - UploadStartCodeServerError + mk20.UploadStatus: + properties: + missing: + description: Missing represents the number of chunks that are not yet uploaded. + type: integer + missing_chunks: + description: MissingChunks is a slice containing the indices of missing chunks. + items: + type: integer + type: array + total_chunks: + description: TotalChunks represents the total number of chunks required for + the upload. + type: integer + uploaded: + description: Uploaded represents the number of chunks successfully uploaded. + type: integer + uploaded_chunks: + description: UploadedChunks is a slice containing the indices of successfully + uploaded chunks. + items: + type: integer + type: array + type: object + mk20.UploadStatusCode: + enum: + - 200 + - 404 + - 425 + - 500 + type: integer + x-enum-varnames: + - UploadStatusCodeOk + - UploadStatusCodeDealNotFound + - UploadStatusCodeUploadNotStarted + - UploadStatusCodeServerError +info: + contact: {} + description: Curio market APIs + title: Curio Market 2.0 API +paths: + /contracts: + get: + description: List of supported DDO contracts + responses: + "200": + description: Array of contract addresses supported by a system or application. + schema: + $ref: '#/definitions/mk20.SupportedContracts' + "500": + description: Internal Server Error + schema: + type: string + summary: List of supported DDO contracts + /info/: + get: + description: '- OpenAPI spec UI for the Market 2.0 APIs' + responses: {} + summary: OpenAPI Spec UI + /info/swagger.json: + get: + description: '- OpenAPI spec for the Market 2.0 APIs in JSON format' + responses: {} + summary: OpenAPI Spec JSON + /info/swagger.yaml: + get: + description: '- OpenAPI spec for the Market 2.0 APIs in YAML format' + responses: {} + summary: OpenAPI Spec YAML + /products: + get: + description: List of supported products + responses: + "200": + description: Array of products supported by the SP + schema: + $ref: '#/definitions/mk20.SupportedProducts' + "500": + description: Internal Server Error + schema: + type: string + summary: List of supported products + /sources: + get: + description: List of supported data sources + responses: + "200": + description: Array of dats sources supported by the SP + schema: + $ref: '#/definitions/mk20.SupportedDataSources' + "500": + description: Internal Server Error + schema: + type: string + summary: List of supported data sources + /status/{id}: + get: + description: Current status of MK20 deal per product + parameters: + - description: id + in: path + name: id + required: true + type: string + responses: + "200": + description: the status response for deal products with their respective + deal statuses + schema: + $ref: '#/definitions/mk20.DealProductStatusResponse' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "500": + description: Internal Server Error + schema: + type: string + security: + - CurioAuth: [] + summary: Status of the MK20 deal + /store: + post: + consumes: + - application/json + description: Make a mk20 deal + parameters: + - description: mk20.Deal in json format + in: body + name: body + required: true + schema: + $ref: '#/definitions/mk20.Deal' + responses: + "200": + description: Ok represents a successful operation with an HTTP status code + of 200 + schema: + $ref: '#/definitions/mk20.DealCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: ErrDealNotFound indicates that the specified deal could not + be found, corresponding to the HTTP status code 404 + schema: + $ref: '#/definitions/mk20.DealCode' + "422": + description: ErrUnsupportedDataSource indicates the specified data source + is not supported or disabled for use in the current context + schema: + $ref: '#/definitions/mk20.DealCode' + "423": + description: ErrUnsupportedProduct indicates that the requested product + is not supported by the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "424": + description: ErrProductNotEnabled indicates that the requested product is + not enabled on the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "425": + description: ErrProductValidationFailed indicates a failure during product-specific + validation due to invalid or missing data + schema: + $ref: '#/definitions/mk20.DealCode' + "426": + description: ErrDealRejectedByMarket indicates that a proposed deal was + rejected by the market for not meeting its acceptance criteria or rules + schema: + $ref: '#/definitions/mk20.DealCode' + "429": + description: ErrServiceOverloaded indicates that the service is overloaded + and cannot process the request at the moment + schema: + $ref: '#/definitions/mk20.DealCode' + "430": + description: ErrMalformedDataSource indicates that the provided data source + is incorrectly formatted or contains invalid data + schema: + $ref: '#/definitions/mk20.DealCode' + "440": + description: ErrMarketNotEnabled indicates that the market is not enabled + for the requested operation + schema: + $ref: '#/definitions/mk20.DealCode' + "441": + description: ErrDurationTooShort indicates that the provided duration value + does not meet the minimum required threshold + schema: + $ref: '#/definitions/mk20.DealCode' + "500": + description: ErrServerInternalError indicates an internal server error with + a corresponding error code of 500 + schema: + $ref: '#/definitions/mk20.DealCode' + "503": + description: ErrServiceMaintenance indicates that the service is temporarily + unavailable due to maintenance, corresponding to HTTP status code 503 + schema: + $ref: '#/definitions/mk20.DealCode' + security: + - CurioAuth: [] + summary: Make a mk20 deal + /update/{id}: + get: + consumes: + - application/json + description: Useful for adding adding additional products and updating PoRep + duration + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: mk20.Deal in json format + in: body + name: body + required: true + schema: + $ref: '#/definitions/mk20.Deal' + responses: + "200": + description: Ok represents a successful operation with an HTTP status code + of 200 + schema: + $ref: '#/definitions/mk20.DealCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: ErrDealNotFound indicates that the specified deal could not + be found, corresponding to the HTTP status code 404 + schema: + $ref: '#/definitions/mk20.DealCode' + "422": + description: ErrUnsupportedDataSource indicates the specified data source + is not supported or disabled for use in the current context + schema: + $ref: '#/definitions/mk20.DealCode' + "423": + description: ErrUnsupportedProduct indicates that the requested product + is not supported by the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "424": + description: ErrProductNotEnabled indicates that the requested product is + not enabled on the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "425": + description: ErrProductValidationFailed indicates a failure during product-specific + validation due to invalid or missing data + schema: + $ref: '#/definitions/mk20.DealCode' + "426": + description: ErrDealRejectedByMarket indicates that a proposed deal was + rejected by the market for not meeting its acceptance criteria or rules + schema: + $ref: '#/definitions/mk20.DealCode' + "429": + description: ErrServiceOverloaded indicates that the service is overloaded + and cannot process the request at the moment + schema: + $ref: '#/definitions/mk20.DealCode' + "430": + description: ErrMalformedDataSource indicates that the provided data source + is incorrectly formatted or contains invalid data + schema: + $ref: '#/definitions/mk20.DealCode' + "440": + description: ErrMarketNotEnabled indicates that the market is not enabled + for the requested operation + schema: + $ref: '#/definitions/mk20.DealCode' + "441": + description: ErrDurationTooShort indicates that the provided duration value + does not meet the minimum required threshold + schema: + $ref: '#/definitions/mk20.DealCode' + "500": + description: ErrServerInternalError indicates an internal server error with + a corresponding error code of 500 + schema: + $ref: '#/definitions/mk20.DealCode' + "503": + description: ErrServiceMaintenance indicates that the service is temporarily + unavailable due to maintenance, corresponding to HTTP status code 503 + schema: + $ref: '#/definitions/mk20.DealCode' + security: + - CurioAuth: [] + summary: Update the deal details of existing deals. + /upload/{id}: + post: + consumes: + - application/json + description: Finalizes the serial upload process once data has been uploaded + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: mk20.deal in json format + in: body + name: body + schema: + $ref: '#/definitions/mk20.Deal' + responses: + "200": + description: Ok represents a successful operation with an HTTP status code + of 200 + schema: + $ref: '#/definitions/mk20.DealCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: ErrDealNotFound indicates that the specified deal could not + be found, corresponding to the HTTP status code 404 + schema: + $ref: '#/definitions/mk20.DealCode' + "422": + description: ErrUnsupportedDataSource indicates the specified data source + is not supported or disabled for use in the current context + schema: + $ref: '#/definitions/mk20.DealCode' + "423": + description: ErrUnsupportedProduct indicates that the requested product + is not supported by the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "424": + description: ErrProductNotEnabled indicates that the requested product is + not enabled on the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "425": + description: ErrProductValidationFailed indicates a failure during product-specific + validation due to invalid or missing data + schema: + $ref: '#/definitions/mk20.DealCode' + "426": + description: ErrDealRejectedByMarket indicates that a proposed deal was + rejected by the market for not meeting its acceptance criteria or rules + schema: + $ref: '#/definitions/mk20.DealCode' + "429": + description: ErrServiceOverloaded indicates that the service is overloaded + and cannot process the request at the moment + schema: + $ref: '#/definitions/mk20.DealCode' + "430": + description: ErrMalformedDataSource indicates that the provided data source + is incorrectly formatted or contains invalid data + schema: + $ref: '#/definitions/mk20.DealCode' + "440": + description: ErrMarketNotEnabled indicates that the market is not enabled + for the requested operation + schema: + $ref: '#/definitions/mk20.DealCode' + "441": + description: ErrDurationTooShort indicates that the provided duration value + does not meet the minimum required threshold + schema: + $ref: '#/definitions/mk20.DealCode' + "500": + description: ErrServerInternalError indicates an internal server error with + a corresponding error code of 500 + schema: + $ref: '#/definitions/mk20.DealCode' + "503": + description: ErrServiceMaintenance indicates that the service is temporarily + unavailable due to maintenance, corresponding to HTTP status code 503 + schema: + $ref: '#/definitions/mk20.DealCode' + security: + - CurioAuth: [] + summary: Finalizes the serial upload process + put: + description: Allows uploading data for deals in a single stream. Suitable for + small deals. + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: raw binary + in: body + name: body + required: true + schema: + items: + type: integer + type: array + responses: + "200": + description: UploadOk indicates a successful upload operation, represented + by the HTTP status code 200 + schema: + $ref: '#/definitions/mk20.UploadCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: UploadStartCodeDealNotFound represents a 404 status indicating + the deal was not found during the upload start process + schema: + $ref: '#/definitions/mk20.UploadStartCode' + "500": + description: UploadServerError indicates a server-side error occurred during + the upload process, represented by the HTTP status code 500 + schema: + $ref: '#/definitions/mk20.UploadCode' + security: + - CurioAuth: [] + summary: Upload the deal data + /uploads/{id}: + get: + description: Return a json struct detailing the current status of a deal upload. + parameters: + - description: id + in: path + name: id + required: true + type: string + responses: + "200": + description: The status of a file upload process, including progress and + missing chunks + schema: + $ref: '#/definitions/mk20.UploadStatus' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: UploadStatusCodeDealNotFound indicates that the requested deal + was not found, corresponding to status code 404 + schema: + $ref: '#/definitions/mk20.UploadStatusCode' + "425": + description: UploadStatusCodeUploadNotStarted indicates that the upload + process has not started yet + schema: + $ref: '#/definitions/mk20.UploadStatusCode' + "500": + description: UploadStatusCodeServerError indicates an internal server error + occurred during the upload process, corresponding to status code 500 + schema: + $ref: '#/definitions/mk20.UploadStatusCode' + security: + - CurioAuth: [] + summary: Status of deal upload + post: + consumes: + - application/json + description: Initializes the upload for a deal. Each upload must be initialized + before chunks can be uploaded for a deal. + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: Metadata for initiating an upload operation + in: body + name: data + required: true + schema: + $ref: '#/definitions/mk20.StartUpload' + responses: + "200": + description: UploadStartCodeOk indicates a successful upload start request + with status code 200 + schema: + $ref: '#/definitions/mk20.UploadStartCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: UploadStartCodeDealNotFound represents a 404 status indicating + the deal was not found during the upload start process + schema: + $ref: '#/definitions/mk20.UploadStartCode' + "409": + description: UploadStartCodeAlreadyStarted indicates that the upload process + has already been initiated and cannot be started again + schema: + $ref: '#/definitions/mk20.UploadStartCode' + "500": + description: UploadStartCodeServerError indicates an error occurred on the + server while processing an upload start request + schema: + $ref: '#/definitions/mk20.UploadStartCode' + security: + - CurioAuth: [] + summary: Starts the upload process + /uploads/{id}/{chunkNum}: + put: + description: Allows uploading chunks for a deal file. Method can be called in + parallel to speed up uploads. + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: chunkNum + in: path + name: chunkNum + required: true + type: string + - description: raw binary + in: body + name: data + required: true + schema: + items: + type: integer + type: array + responses: + "200": + description: UploadOk indicates a successful upload operation, represented + by the HTTP status code 200 + schema: + $ref: '#/definitions/mk20.UploadCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: UploadNotFound represents an error where the requested upload + chunk could not be found, typically corresponding to HTTP status 404 + schema: + $ref: '#/definitions/mk20.UploadCode' + "409": + description: UploadChunkAlreadyUploaded indicates that the chunk has already + been uploaded and cannot be re-uploaded + schema: + $ref: '#/definitions/mk20.UploadCode' + "500": + description: UploadServerError indicates a server-side error occurred during + the upload process, represented by the HTTP status code 500 + schema: + $ref: '#/definitions/mk20.UploadCode' + security: + - CurioAuth: [] + summary: Upload a file chunk + /uploads/finalize/{id}: + post: + consumes: + - application/json + description: Finalizes the upload process once all the chunks are uploaded. + parameters: + - description: id + in: path + name: id + required: true + type: string + - description: mk20.deal in json format + in: body + name: body + schema: + $ref: '#/definitions/mk20.Deal' + responses: + "200": + description: Ok represents a successful operation with an HTTP status code + of 200 + schema: + $ref: '#/definitions/mk20.DealCode' + "400": + description: Bad Request - Invalid input or validation error + schema: + type: string + "404": + description: ErrDealNotFound indicates that the specified deal could not + be found, corresponding to the HTTP status code 404 + schema: + $ref: '#/definitions/mk20.DealCode' + "422": + description: ErrUnsupportedDataSource indicates the specified data source + is not supported or disabled for use in the current context + schema: + $ref: '#/definitions/mk20.DealCode' + "423": + description: ErrUnsupportedProduct indicates that the requested product + is not supported by the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "424": + description: ErrProductNotEnabled indicates that the requested product is + not enabled on the provider + schema: + $ref: '#/definitions/mk20.DealCode' + "425": + description: ErrProductValidationFailed indicates a failure during product-specific + validation due to invalid or missing data + schema: + $ref: '#/definitions/mk20.DealCode' + "426": + description: ErrDealRejectedByMarket indicates that a proposed deal was + rejected by the market for not meeting its acceptance criteria or rules + schema: + $ref: '#/definitions/mk20.DealCode' + "429": + description: ErrServiceOverloaded indicates that the service is overloaded + and cannot process the request at the moment + schema: + $ref: '#/definitions/mk20.DealCode' + "430": + description: ErrMalformedDataSource indicates that the provided data source + is incorrectly formatted or contains invalid data + schema: + $ref: '#/definitions/mk20.DealCode' + "440": + description: ErrMarketNotEnabled indicates that the market is not enabled + for the requested operation + schema: + $ref: '#/definitions/mk20.DealCode' + "441": + description: ErrDurationTooShort indicates that the provided duration value + does not meet the minimum required threshold + schema: + $ref: '#/definitions/mk20.DealCode' + "500": + description: ErrServerInternalError indicates an internal server error with + a corresponding error code of 500 + schema: + $ref: '#/definitions/mk20.DealCode' + "503": + description: ErrServiceMaintenance indicates that the service is temporarily + unavailable due to maintenance, corresponding to HTTP status code 503 + schema: + $ref: '#/definitions/mk20.DealCode' + security: + - CurioAuth: [] + summary: Finalizes the upload process +securityDefinitions: + CurioAuth: + description: |- + Use the format: `CurioAuth PublicKeyType:PublicKey:Signature` + + - `PublicKeyType`: String representation of type of wallet (e.g., "ed25519", "bls", "secp256k1") + - `PublicKey`: Base64 string of public key bytes + - `Signature`: Signature is Base64 string of signature bytes. + - The client is expected to sign the SHA-256 hash of a message constructed by concatenating the following components, in order. + - The raw public key bytes (not a human-readable address) + - The timestamp, truncated to the nearest hour, formatted in RFC3339 (e.g., 2025-07-15T17:00:00Z) + - These two byte slices are joined without any delimiter between them, and the resulting byte array is then hashed using SHA-256. The signature is performed on that hash. + in: header + name: Authorization + type: apiKey +swagger: "2.0" diff --git a/market/mk20/mk20.go b/market/mk20/mk20.go new file mode 100644 index 000000000..59e7337e6 --- /dev/null +++ b/market/mk20/mk20.go @@ -0,0 +1,1011 @@ +package mk20 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/oklog/ulid" + "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v16/miner" + "github.com/filecoin-project/go-state-types/builtin/v16/verifreg" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/multictladdr" + "github.com/filecoin-project/curio/lib/paths" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" +) + +var log = logging.Logger("mk20") + +type MK20API interface { + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifreg9.AllocationId, tsk types.TipSetKey) (*verifreg9.Allocation, error) +} + +type MK20 struct { + miners []address.Address + DB *harmonydb.DB + api MK20API + ethClient *ethclient.Client + si paths.SectorIndex + cfg *config.CurioConfig + sm map[address.Address]abi.SectorSize + as *multictladdr.MultiAddressSelector + sc *ffi.SealCalls + maxParallelUploads *atomic.Int64 + unknowClient bool +} + +func NewMK20Handler(miners []address.Address, db *harmonydb.DB, si paths.SectorIndex, mapi MK20API, ethClient *ethclient.Client, cfg *config.CurioConfig, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) (*MK20, error) { + ctx := context.Background() + + // Ensure MinChunk size and max chunkSize is a power of 2 + if cfg.Market.StorageMarketConfig.MK20.MinimumChunkSize&(cfg.Market.StorageMarketConfig.MK20.MinimumChunkSize-1) != 0 { + return nil, xerrors.Errorf("MinimumChunkSize must be a power of 2") + } + + if cfg.Market.StorageMarketConfig.MK20.MaximumChunkSize&(cfg.Market.StorageMarketConfig.MK20.MaximumChunkSize-1) != 0 { + return nil, xerrors.Errorf("MaximumChunkSize must be a power of 2") + } + + sm := make(map[address.Address]abi.SectorSize) + + for _, m := range miners { + info, err := mapi.StateMinerInfo(ctx, m, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting miner info: %w", err) + } + if _, ok := sm[m]; !ok { + sm[m] = info.SectorSize + } + } + + go markDownloaded(ctx, db) + go removeNotFinalizedUploads(ctx, db) + + return &MK20{ + miners: miners, + DB: db, + api: mapi, + ethClient: ethClient, + si: si, + cfg: cfg, + sm: sm, + as: as, + sc: sc, + maxParallelUploads: new(atomic.Int64), + unknowClient: !cfg.Market.StorageMarketConfig.MK20.DenyUnknownClients, + }, nil +} + +// ExecuteDeal take a *Deal and returns ProviderDealRejectionInfo which has ErrorCode and Reason +// @param deal *Deal +// @Return DealCode +// @Return Reason string + +func (m *MK20) ExecuteDeal(ctx context.Context, deal *Deal, auth string) *ProviderDealRejectionInfo { + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + log.Errorf("panic occurred: %v\n%s", r, trace[:n]) + debug.PrintStack() + } + }() + + // Validate the DataSource + code, err := deal.Validate(m.DB, &m.cfg.Market.StorageMarketConfig.MK20, auth) + if err != nil { + log.Errorw("deal rejected", "deal", deal, "error", err) + ret := &ProviderDealRejectionInfo{ + HTTPCode: code, + } + if code == ErrServerInternalError { + ret.Reason = "Internal server error" + } else { + ret.Reason = err.Error() + } + return ret + } + + log.Debugw("deal validated", "deal", deal.Identifier.String()) + + if deal.Products.DDOV1 != nil { + // TODO: Remove this check once DDO market is done + if build.BuildType == build.Build2k || build.BuildType == build.BuildDebug { + return m.processDDODeal(ctx, deal, nil) + } + log.Errorw("DDOV1 is not supported yet", "deal", deal.Identifier.String()) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrUnsupportedProduct, + Reason: "DDOV1 is not supported yet", + } + } + + if deal.Products.PDPV1 != nil { + return m.processPDPDeal(ctx, deal) + } + + return &ProviderDealRejectionInfo{ + HTTPCode: ErrUnsupportedProduct, + Reason: "Unsupported product", + } +} + +func (m *MK20) processDDODeal(ctx context.Context, deal *Deal, tx *harmonydb.Tx) *ProviderDealRejectionInfo { + rejection, err := m.sanitizeDDODeal(ctx, deal) + if err != nil { + log.Errorw("deal rejected", "deal", deal, "error", err) + return rejection + } + + log.Debugw("deal sanitized", "deal", deal.Identifier.String()) + + if rejection != nil { + return rejection + } + + id, code, err := deal.Products.DDOV1.GetDealID(ctx, m.DB, m.ethClient) + if err != nil { + log.Errorw("error getting deal ID", "deal", deal, "error", err) + ret := &ProviderDealRejectionInfo{ + HTTPCode: code, + } + if code == ErrServerInternalError { + ret.Reason = "Internal server error" + } else { + ret.Reason = err.Error() + } + return ret + } + + log.Debugw("deal ID found", "deal", deal.Identifier.String(), "id", id) + + // TODO: Backpressure, client filter + + process := func(tx *harmonydb.Tx) error { + err = deal.SaveToDB(tx) + if err != nil { + return err + } + n, err := tx.Exec(`UPDATE market_mk20_deal + SET ddo_v1 = jsonb_set(ddo_v1, '{deal_id}', to_jsonb($1::bigint)) + WHERE id = $2;`, id, deal.Identifier.String()) + if err != nil { + return err + } + if n != 1 { + return fmt.Errorf("expected 1 row to be updated, got %d", n) + } + + // Assume upload if no data source defined + if deal.Data == nil { + _, err = tx.Exec(`INSERT INTO market_mk20_upload_waiting (id) VALUES ($1) ON CONFLICT (id) DO NOTHING`, deal.Identifier.String()) + } else { + if deal.Data.SourceHttpPut != nil { + _, err = tx.Exec(`INSERT INTO market_mk20_upload_waiting (id) VALUES ($1) ON CONFLICT (id) DO NOTHING`, deal.Identifier.String()) + } else { + // All deals which are not upload should be entered in market_mk20_pipeline_waiting for further processing. + _, err = tx.Exec(`INSERT INTO market_mk20_pipeline_waiting (id) VALUES ($1) ON CONFLICT (id) DO NOTHING`, deal.Identifier.String()) + } + } + + if err != nil { + return xerrors.Errorf("adding deal to waiting pipeline: %w", err) + } + return nil + } + + if tx != nil { + err := process(tx) + if err != nil { + log.Errorw("error inserting deal into DB", "deal", deal, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + } + } + } else { + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = process(tx) + if err != nil { + return false, err + } + return true, nil + }) + + if err != nil { + log.Errorw("error inserting deal into DB", "deal", deal, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + } + } + + if !comm { + log.Errorw("error committing deal into DB", "deal", deal) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + } + } + } + + log.Debugw("deal inserted in DB", "deal", deal.Identifier.String()) + + return &ProviderDealRejectionInfo{ + HTTPCode: Ok, + } +} + +func (m *MK20) sanitizeDDODeal(ctx context.Context, deal *Deal) (*ProviderDealRejectionInfo, error) { + if !lo.Contains(m.miners, deal.Products.DDOV1.Provider) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Provider not available in Curio cluster", + }, nil + } + + if deal.Data == nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Data Source must be defined for a DDO deal", + }, nil + } + + if deal.Products.RetrievalV1 == nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Retrieval product must be defined for a DDO deal", + }, nil + } + + if deal.Products.RetrievalV1.AnnouncePiece { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Piece cannot be announced for a DDO deal", + }, nil + } + + size, err := deal.Size() + if err != nil { + log.Errorw("error getting deal size", "deal", deal, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Error getting deal size from PieceCID", + }, nil + } + + if size > abi.PaddedPieceSize(m.sm[deal.Products.DDOV1.Provider]) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Deal size is larger than the miner's sector size", + }, nil + } + + if deal.Data.Format.Raw != nil { + if deal.Products.RetrievalV1 != nil { + if deal.Products.RetrievalV1.Indexing { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Raw bytes deal cannot be indexed", + }, nil + } + } + } + + if deal.Products.DDOV1.AllocationId != nil { + if size < abi.PaddedPieceSize(verifreg.MinimumVerifiedAllocationSize) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Verified piece size must be at least 1MB", + }, nil + } + + client, err := address.NewFromString(deal.Client) + if err != nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Client address is not valid", + }, nil + } + + alloc, err := m.api.StateGetAllocation(ctx, client, verifreg9.AllocationId(*deal.Products.DDOV1.AllocationId), types.EmptyTSK) + if err != nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + }, xerrors.Errorf("getting allocation: %w", err) + } + + if alloc == nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Verified piece must have a valid allocation ID", + }, nil + } + + clientID, err := address.IDFromAddress(client) + if err != nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Invalid client address", + }, nil + } + + if alloc.Client != abi.ActorID(clientID) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "client address does not match the allocation client address", + }, nil + } + + prov, err := address.NewIDAddress(uint64(alloc.Provider)) + if err != nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + }, xerrors.Errorf("getting provider address: %w", err) + } + + if !lo.Contains(m.miners, prov) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Allocation provider does not belong to the list of miners in Curio cluster", + }, nil + } + + if !deal.Data.PieceCID.Equals(alloc.Data) { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Allocation data CID does not match the piece CID", + }, nil + } + + if size != alloc.Size { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Allocation size does not match the piece size", + }, nil + } + + if alloc.TermMin > miner.MaxSectorExpirationExtension-policy.SealRandomnessLookback { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Allocation term min is greater than the maximum sector expiration extension", + }, nil + } + } + + return nil, nil +} + +func (m *MK20) processPDPDeal(ctx context.Context, deal *Deal) *ProviderDealRejectionInfo { + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + log.Errorf("panic occurred in PDP: %v\n%s", r, trace[:n]) + debug.PrintStack() + } + }() + + rejection, err := m.sanitizePDPDeal(ctx, deal) + if err != nil { + log.Errorw("PDP deal rejected", "deal", deal, "error", err) + return rejection + } + + log.Debugw("PDP deal sanitized", "deal", deal.Identifier.String()) + + if rejection != nil { + return rejection + } + + // Save deal to DB and start pipeline if required + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Save deal + err = deal.SaveToDB(tx) + if err != nil { + return false, xerrors.Errorf("saving deal to DB: %w", err) + } + + pdp := deal.Products.PDPV1 + + if pdp.AddPiece { + // If we have data source other that PUT then start the pipeline + if deal.Data != nil { + if deal.Data.SourceHTTP != nil || deal.Data.SourceAggregate != nil { + err = insertPDPPipeline(ctx, tx, deal) + if err != nil { + return false, xerrors.Errorf("inserting pipeline: %w", err) + } + } + if deal.Data.SourceHttpPut != nil { + _, err = tx.Exec(`INSERT INTO market_mk20_upload_waiting (id) VALUES ($1) ON CONFLICT (id) DO NOTHING`, deal.Identifier.String()) + if err != nil { + return false, xerrors.Errorf("inserting upload waiting: %w", err) + } + } + } else { + // Assume upload + _, err = tx.Exec(`INSERT INTO market_mk20_upload_waiting (id) VALUES ($1) ON CONFLICT (id) DO NOTHING`, deal.Identifier.String()) + if err != nil { + return false, xerrors.Errorf("inserting upload waiting: %w", err) + } + } + } + + if pdp.CreateDataSet { + n, err := m.DB.Exec(ctx, `INSERT INTO pdp_data_set_create (id, client, record_keeper, extra_data) VALUES ($1, $2, $3, $4)`, + deal.Identifier.String(), deal.Client, pdp.RecordKeeper, pdp.ExtraData) + if err != nil { + return false, xerrors.Errorf("inserting PDP proof set create: %w", err) + } + if n != 1 { + return false, fmt.Errorf("expected 1 row to be updated, got %d", n) + } + } + + if pdp.DeleteDataSet { + n, err := m.DB.Exec(ctx, `INSERT INTO pdp_data_set_delete (id, client, set_id, extra_data) VALUES ($1, $2, $3, $4)`, + deal.Identifier.String(), deal.Client, *pdp.DataSetID, pdp.ExtraData) + if err != nil { + return false, xerrors.Errorf("inserting PDP proof set delete: %w", err) + } + if n != 1 { + return false, fmt.Errorf("expected 1 row to be updated, got %d", n) + } + } + + if pdp.DeletePiece { + n, err := m.DB.Exec(ctx, `INSERT INTO pdp_piece_delete (id, client, set_id, pieces, extra_data) VALUES ($1, $2, $3, $4, $5)`, + deal.Identifier.String(), deal.Client, *pdp.DataSetID, pdp.PieceIDs, pdp.ExtraData) + if err != nil { + return false, xerrors.Errorf("inserting PDP delete root: %w", err) + } + if n != 1 { + return false, fmt.Errorf("expected 1 row to be updated, got %d", n) + } + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorw("error inserting PDP deal into DB", "deal", deal, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + } + } + if !comm { + log.Errorw("error committing PDP deal into DB", "deal", deal) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + } + } + log.Debugw("PDP deal inserted in DB", "deal", deal.Identifier.String()) + return &ProviderDealRejectionInfo{ + HTTPCode: Ok, + } +} + +func (m *MK20) sanitizePDPDeal(ctx context.Context, deal *Deal) (*ProviderDealRejectionInfo, error) { + if deal.Products.PDPV1.AddPiece && deal.Products.RetrievalV1 == nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Retrieval deal is required for pdp_v1", + }, nil + } + + if deal.Data != nil { + if deal.Data.SourceOffline != nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Offline data source is not supported for pdp_v1", + }, nil + } + + if deal.Data.Format.Raw != nil && deal.Products.RetrievalV1.AnnouncePayload { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "Raw bytes deal cannot be announced to IPNI", + }, nil + } + } + + p := deal.Products.PDPV1 + + // This serves as Auth for now. We are checking if client is authorized to make changes to the proof set or pieces + // In future this will be replaced by an ACL check + + if p.DeleteDataSet || p.AddPiece { + pid := *p.DataSetID + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS(SELECT 1 FROM pdp_data_set WHERE id = $1 AND removed = FALSE AND client = $2)`, pid, deal.Client).Scan(&exists) + if err != nil { + log.Errorw("error checking if proofset exists", "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + }, nil + } + if !exists { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "proofset does not exist for the client", + }, nil + } + } + + if p.DeletePiece { + pid := *p.DataSetID + var exists bool + err := m.DB.QueryRow(ctx, `SELECT COUNT(*) = cardinality($2::BIGINT[]) AS all_exist_and_active + FROM pdp_dataset_piece r + JOIN pdp_data_set s ON r.data_set_id = s.id + WHERE r.data_set_id = $1 + AND r.piece = ANY($2) + AND r.removed = FALSE + AND s.removed = FALSE + AND r.client = $3 + AND s.client = $3;`, pid, p.PieceIDs, deal.Client).Scan(&exists) + if err != nil { + log.Errorw("error checking if dataset and pieces exist for the client", "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + }, nil + + } + if !exists { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "dataset or one of the pieces does not exist for the client", + }, nil + } + } + + return nil, nil +} + +func insertPDPPipeline(ctx context.Context, tx *harmonydb.Tx, deal *Deal) error { + pdp := deal.Products.PDPV1 + retv := deal.Products.RetrievalV1 + data := deal.Data + dealID := deal.Identifier.String() + pi, err := deal.PieceInfo() + if err != nil { + return fmt.Errorf("getting piece info: %w", err) + } + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + // Insert pipeline when Data source is HTTP + if data.SourceHTTP != nil { + var pieceID int64 + // Attempt to select the piece ID first + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2`, pi.PieceCIDV1.String(), pi.Size).Scan(&pieceID) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + // Piece does not exist, attempt to insert + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + VALUES ($1, $2, $3, TRUE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id`, pi.PieceCIDV1.String(), pi.Size, pi.RawSize).Scan(&pieceID) + if err != nil { + return xerrors.Errorf("inserting new parked piece and getting id: %w", err) + } + } else { + // Some other error occurred during select + return xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + var refIds []int64 + + // Add parked_piece_refs + for _, src := range data.SourceHTTP.URLs { + var refID int64 + + headers, err := json.Marshal(src.Headers) + if err != nil { + return xerrors.Errorf("marshaling headers: %w", err) + } + + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + VALUES ($1, $2, $3, TRUE) RETURNING ref_id`, pieceID, src.URL, headers).Scan(&refID) + if err != nil { + return xerrors.Errorf("inserting parked piece ref: %w", err) + } + refIds = append(refIds, refID) + } + + n, err := tx.Exec(`INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) VALUES ($1, $2, $3, $4)`, + dealID, deal.Data.PieceCID.String(), ProductNamePDPV1, refIds) + if err != nil { + return xerrors.Errorf("inserting PDP download pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting PDP download pipeline: %d rows affected", n) + } + + n, err = tx.Exec(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, extra_data, deal_aggregation, indexing, announce, announce_payload) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`, + dealID, deal.Client, data.PieceCID.String(), *pdp.DataSetID, + pdp.ExtraData, aggregation, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if err != nil { + return xerrors.Errorf("inserting PDP pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting PDP pipeline: %d rows affected", n) + } + return nil + } + + // Insert pipeline when data source is aggregate + if deal.Data.SourceAggregate != nil { + + // Find all unique pieces where data source is HTTP + type downloadkey struct { + ID string + PieceCIDV2 cid.Cid + PieceCID cid.Cid + Size abi.PaddedPieceSize + RawSize uint64 + } + toDownload := make(map[downloadkey][]HttpUrl) + + for _, piece := range deal.Data.SourceAggregate.Pieces { + spi, err := GetPieceInfo(piece.PieceCID) + if err != nil { + return xerrors.Errorf("getting piece info: %w", err) + } + if piece.SourceHTTP != nil { + urls, ok := toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size, RawSize: spi.RawSize}] + if ok { + toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size}] = append(urls, piece.SourceHTTP.URLs...) + } else { + toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size, RawSize: spi.RawSize}] = piece.SourceHTTP.URLs + } + } + } + + batch := &pgx.Batch{} + batchSize := 5000 + + for k, v := range toDownload { + for _, src := range v { + headers, err := json.Marshal(src.Headers) + if err != nil { + return xerrors.Errorf("marshal headers: %w", err) + } + batch.Queue(`WITH inserted_piece AS ( + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + VALUES ($1, $2, $3, FALSE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id + ), + selected_piece AS ( + SELECT COALESCE( + (SELECT id FROM inserted_piece), + (SELECT id FROM parked_pieces + WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = FALSE AND cleanup_task_id IS NULL) + ) AS id + ), + inserted_ref AS ( + INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + SELECT id, $4, $5, FALSE FROM selected_piece + RETURNING ref_id + ) + INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) + VALUES ($6, $8, $7, ARRAY[(SELECT ref_id FROM inserted_ref)]) + ON CONFLICT (id, piece_cid_v2, product) DO UPDATE + SET ref_ids = array_append( + market_mk20_download_pipeline.ref_ids, + (SELECT ref_id FROM inserted_ref) + ) + WHERE NOT market_mk20_download_pipeline.ref_ids @> ARRAY[(SELECT ref_id FROM inserted_ref)];`, + k.PieceCID.String(), k.Size, k.RawSize, src.URL, headers, k.ID, ProductNamePDPV1, k.PieceCIDV2.String()) + } + + if batch.Len() > batchSize { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing parked piece query batch: %w", err) + } + batch = &pgx.Batch{} + } + } + + if batch.Len() > 0 { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing parked piece query batch: %w", err) + } + } + + pBatch := &pgx.Batch{} + pBatchSize := 4000 + for i, piece := range deal.Data.SourceAggregate.Pieces { + pBatch.Queue(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, extra_data, piece_ref, deal_aggregation, aggr_index, indexing, announce, announce_payload) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + dealID, deal.Client, piece.PieceCID.String(), pdp.ExtraData, *pdp.DataSetID, + aggregation, i, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if pBatch.Len() > pBatchSize { + res := tx.SendBatch(ctx, pBatch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing mk20 pipeline insert batch: %w", err) + } + pBatch = &pgx.Batch{} + } + } + if pBatch.Len() > 0 { + res := tx.SendBatch(ctx, pBatch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing mk20 pipeline insert batch: %w", err) + } + } + return nil + } + + return xerrors.Errorf("unknown data source type") +} + +func markDownloaded(ctx context.Context, db *harmonydb.DB) { + md := func(ctx context.Context, db *harmonydb.DB) { + //var deals []struct { + // ID string `db:"id"` + // PieceCID string `db:"piece_cid_v2"` + //} + // + //err := db.Select(ctx, &deals, `SELECT id, piece_cid_v2 FROM pdp_pipeline WHERE piece_ref IS NULL`) + //if err != nil { + // log.Errorw("error getting PDP deals", "error", err) + //} + // + //for _, deal := range deals { + // _, err = db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // pcid2, err := cid.Decode(deal.PieceCID) + // if err != nil { + // return false, xerrors.Errorf("decoding piece cid: %w", err) + // } + // + // pi, err := GetPieceInfo(pcid2) + // if err != nil { + // return false, xerrors.Errorf("getting piece info: %w", err) + // } + // + // var refid int64 + // err = tx.QueryRow(`SELECT u.ref_id FROM ( + // SELECT unnest(dp.ref_ids) AS ref_id + // FROM market_mk20_download_pipeline dp + // WHERE dp.id = $1 AND dp.piece_cid = $2 AND dp.piece_size = $3 AND dp.product = $4 + // ) u + // JOIN parked_piece_refs pr ON pr.ref_id = u.ref_id + // JOIN parked_pieces pp ON pp.id = pr.piece_id + // WHERE pp.complete = TRUE + // LIMIT 1;`, deal.ID, pi.PieceCIDV1.String(), pi.Size, ProductNamePDPV1).Scan(&refid) + // if err != nil { + // if errors.Is(err, pgx.ErrNoRows) { + // return false, nil + // } + // return false, xerrors.Errorf("failed to check if the piece is downloaded: %w", err) + // } + // + // // Remove other ref_ids from piece_park_refs + // _, err = tx.Exec(`DELETE FROM parked_piece_refs + // WHERE ref_id IN ( + // SELECT unnest(dp.ref_ids) + // FROM market_mk20_download_pipeline dp + // WHERE dp.id = $1 AND dp.piece_cid = $2 AND dp.piece_size = $3 AND dp.product = $4 + // ) + // AND ref_id != $5;`, deal.ID, pi.PieceCIDV1.String(), pi.Size, ProductNamePDPV1, refid) + // if err != nil { + // return false, xerrors.Errorf("failed to remove other ref_ids from piece_park_refs: %w", err) + // } + // + // _, err = tx.Exec(`DELETE FROM market_mk20_download_pipeline WHERE id = $1 AND piece_cid = $2 AND piece_size = $3 AND product = $4;`, + // deal.ID, pi.PieceCIDV1.String(), pi.Size, ProductNamePDPV1) + // if err != nil { + // return false, xerrors.Errorf("failed to delete piece from download table: %w", err) + // } + // + // _, err = tx.Exec(`UPDATE pdp_pipeline SET downloaded = TRUE, piece_ref = $1 + // WHERE id = $2 + // AND piece_cid_v2 = $3`, + // refid, deal.ID, deal.PieceCID) + // if err != nil { + // return false, xerrors.Errorf("failed to update download statos for PDP pipeline: %w", err) + // } + // return true, nil + // }, harmonydb.OptionRetry()) + // if err != nil { + // log.Errorw("error updating PDP deal", "deal", deal, "error", err) + // } + //} + n, err := db.Exec(ctx, `SELECT mk20_pdp_mark_downloaded($1)`, ProductNamePDPV1) + if err != nil { + log.Errorf("failed to mark PDP downloaded piece: %v", err) + return + } + log.Debugf("Succesfully marked %d PDP pieces as downloaded", n) + } + + ticker := time.NewTicker(time.Second * 2) + defer ticker.Stop() + for { + select { + case <-ticker.C: + md(ctx, db) + case <-ctx.Done(): + return + } + } +} + +// UpdateDeal updates the details of a deal specified by its ID and returns ProviderDealRejectionInfo which has ErrorCode and Reason +// @param id ulid.ULID +// @param deal *Deal +// @Return DealCode +// @Return Reason string + +func (m *MK20) UpdateDeal(id ulid.ULID, deal *Deal, auth string) *ProviderDealRejectionInfo { + if deal == nil { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrBadProposal, + Reason: "deal is undefined", + } + } + + ctx := context.Background() + + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_deal + WHERE id = $1)`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if deal exists", "deal", id, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + } + } + + if !exists { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrDealNotFound, + Reason: "", + } + } + + code, nd, np, err := m.updateDealDetails(id, deal, auth) + if err != nil { + log.Errorw("failed to update deal details", "deal", id, "error", err) + if code == ErrServerInternalError { + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + } + } else { + return &ProviderDealRejectionInfo{ + HTTPCode: code, + Reason: err.Error(), + } + } + } + + var rejection *ProviderDealRejectionInfo + + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Save the updated deal to DB + err = nd.UpdateDeal(tx) + if err != nil { + return false, xerrors.Errorf("failed to update deal: %w", err) + } + + // Initiate new pipelines for DDO if required + for _, p := range np { + if p == ProductNameDDOV1 { + rejection = m.processDDODeal(ctx, nd, tx) + if rejection.HTTPCode != Ok { + return false, xerrors.Errorf("failed to process DDO deal") + } + } + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorw("failed to update deal details", "deal", id, "error", err) + if rejection != nil { + return rejection + } + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + } + } + + if !comm { + log.Errorw("failed to commit deal details", "deal", id, "error", err) + return &ProviderDealRejectionInfo{ + HTTPCode: ErrServerInternalError, + Reason: "", + } + } + + return &ProviderDealRejectionInfo{ + HTTPCode: Ok, + Reason: "", + } +} + +// To be used later for when data source is minerID +//func validateMinerAddresses(madrs []abi.Multiaddrs, pcid cid.Cid, psize abi.PaddedPieceSize, rawSize int64) bool { +// var surls []*url.URL +// for _, adr := range madrs { +// surl, err := maurl.ToURL(multiaddr.Cast(adr)) +// if err != nil { +// continue +// } +// surls = append(surls, surl) +// } +// +// var validUrls []*url.URL +// +// for _, surl := range surls { +// if surl.Scheme == "ws" { +// surl.Scheme = "http" +// } +// +// if surl.Scheme == "wss" { +// surl.Scheme = "https" +// } +// +// if surl.Port() == "443" { +// surl.Host = surl.Hostname() +// } +// +// if surl.Port() == "80" { +// surl.Host = surl.Hostname() +// } +// +// resp, err := http.Head(surl.String() + "/piece/" + pcid.String()) +// if err != nil { +// continue +// } +// if resp.StatusCode != 200 { +// continue +// } +// +// if resp.Header.Get("Content-Length") != fmt.Sprint(psize) { +// continue +// } +// +// validUrls = append(validUrls, surl) +// } +// return len(validUrls) > 0 +//} diff --git a/market/mk20/mk20_upload.go b/market/mk20/mk20_upload.go new file mode 100644 index 000000000..baf4bf027 --- /dev/null +++ b/market/mk20/mk20_upload.go @@ -0,0 +1,1332 @@ +package mk20 + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "runtime" + "runtime/debug" + "time" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/storiface" +) + +// HandleUploadStatus retrieves and returns the upload status of a deal, including chunk completion details, or reports errors if the process fails. +// @param ID ulid.ULID +// @Return UploadStatusCode + +func (m *MK20) HandleUploadStatus(ctx context.Context, id ulid.ULID, w http.ResponseWriter) { + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_upload_waiting + WHERE id = $1 + AND (chunked IS NULL OR chunked = TRUE) + );`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if upload is waiting for data", "deal", id, "error", err) + w.WriteHeader(int(UploadStatusCodeServerError)) + return + } + if !exists { + http.Error(w, "deal not found", int(UploadStatusCodeDealNotFound)) + return + } + + var ret UploadStatus + + err = m.DB.QueryRow(ctx, `SELECT + COUNT(*) AS total, + COUNT(*) FILTER (WHERE complete) AS complete, + COUNT(*) FILTER (WHERE NOT complete) AS missing, + ARRAY_AGG(chunk ORDER BY chunk) FILTER (WHERE complete) AS completed_chunks, + ARRAY_AGG(chunk ORDER BY chunk) FILTER (WHERE NOT complete) AS incomplete_chunks + FROM + market_mk20_deal_chunk + WHERE + id = $1 + GROUP BY + id;`, id.String()).Scan(&ret.TotalChunks, &ret.Uploaded, &ret.Missing, &ret.UploadedChunks, &ret.MissingChunks) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + log.Errorw("failed to get upload status", "deal", id, "error", err) + w.WriteHeader(int(UploadStatusCodeServerError)) + return + } + + http.Error(w, "upload not initiated", int(UploadStatusCodeUploadNotStarted)) + return + } + + data, err := json.Marshal(ret) + if err != nil { + log.Errorw("failed to marshal upload status", "deal", id, "error", err) + w.WriteHeader(int(UploadStatusCodeServerError)) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(int(UploadStatusCodeOk)) + + _, err = w.Write(data) + if err != nil { + log.Errorw("failed to write upload status", "deal", id, "error", err) + } +} + +// HandleUploadStart handles the initialization of a file upload process for a specific deal, validating input and creating database entries. +// @param ID ulid.ULID +// @param upload StartUpload +// @Return UploadStartCode + +func (m *MK20) HandleUploadStart(ctx context.Context, id ulid.ULID, upload StartUpload, w http.ResponseWriter) { + chunkSize := upload.ChunkSize + if upload.RawSize == 0 { + log.Errorw("raw size must be greater than 0", "id", id) + http.Error(w, "raw size must be greater than 0", int(UploadStartCodeBadRequest)) + return + } + + if chunkSize == 0 { + log.Errorw("chunk size must be greater than 0", "id", id) + http.Error(w, "chunk size must be greater than 0", int(UploadStartCodeBadRequest)) + return + } + + // Check if chunk size is a power of 2 + if chunkSize&(chunkSize-1) != 0 { + log.Errorw("chunk size must be a power of 2", "id", id) + http.Error(w, "chunk size must be a power of 2", int(UploadStartCodeBadRequest)) + return + } + + // Check that chunk size align with config + if chunkSize < m.cfg.Market.StorageMarketConfig.MK20.MinimumChunkSize { + log.Errorw("chunk size too small", "id", id) + http.Error(w, "chunk size too small", int(UploadStartCodeBadRequest)) + return + } + if chunkSize > m.cfg.Market.StorageMarketConfig.MK20.MaximumChunkSize { + log.Errorw("chunk size too large", "id", id) + http.Error(w, "chunk size too large", int(UploadStartCodeBadRequest)) + return + } + + // Check if deal exists + var exists bool + + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_upload_waiting + WHERE id = $1 AND chunked IS NULL);`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if deal is waiting for upload to start", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + if !exists { + http.Error(w, "deal not found", int(UploadStartCodeDealNotFound)) + return + } + + // Check if we already started the upload + var started bool + err = m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_deal_chunk + WHERE id = $1);`, id.String()).Scan(&started) + if err != nil { + log.Errorw("failed to check if deal upload has started", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + + if started { + http.Error(w, "deal upload has already started", int(UploadStartCodeAlreadyStarted)) + return + } + + deal, err := DealFromDB(ctx, m.DB, id) + if err != nil { + log.Errorw("failed to get deal from db", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + + var rawSize uint64 + + if deal.Data != nil { + rawSize, err = deal.RawSize() + if err != nil { + log.Errorw("failed to get raw size of deal", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + if rawSize != upload.RawSize { + log.Errorw("raw size of deal does not match the one provided in deal", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeBadRequest)) + } + } + + numChunks := int(math.Ceil(float64(rawSize) / float64(chunkSize))) + + // Create rows in market_mk20_deal_chunk for each chunk for the ID + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + batch := &pgx.Batch{} + batchSize := 15000 + for i := 1; i <= numChunks; i++ { + if i < numChunks { + batch.Queue(`INSERT INTO market_mk20_deal_chunk (id, chunk, chunk_size, complete) VALUES ($1, $2, $3, FALSE);`, id.String(), i, chunkSize) + } else { + // Calculate the size of last chunk + s := int64(rawSize) - (int64(numChunks-1) * chunkSize) + if s <= 0 || s > chunkSize { + return false, xerrors.Errorf("invalid chunk size") + } + + batch.Queue(`INSERT INTO market_mk20_deal_chunk (id, chunk, chunk_size, complete) VALUES ($1, $2, $3, FALSE);`, id.String(), i, s) + } + if batch.Len() >= batchSize { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return false, xerrors.Errorf("closing insert chunk batch: %w", err) + } + batch = &pgx.Batch{} + } + } + if batch.Len() > 0 { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return false, xerrors.Errorf("closing insert chunk batch: %w", err) + } + } + n, err := tx.Exec(`UPDATE market_mk20_upload_waiting SET chunked = TRUE WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("updating chunked flag: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating chunked flag: expected 1 row updated, got %d", n) + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorw("failed to create chunks for deal", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + if !comm { + log.Errorw("failed to create chunks for deal", "deal", id, "error", err) + http.Error(w, "", int(UploadStartCodeServerError)) + return + } + w.WriteHeader(int(UploadStartCodeOk)) +} + +// HandleUploadChunk processes a single chunk upload for a deal and validates its state. +// It checks if the chunk exists, ensures it's not already uploaded, and stores it if valid. +// The function updates the database with chunk details and manages transaction rolls back on failure. +// @param id ulid.ULID +// @param chunk int +// @param data []byte +// @Return UploadCode + +func (m *MK20) HandleUploadChunk(id ulid.ULID, chunk int, data io.ReadCloser, w http.ResponseWriter) { + if m.maxParallelUploads.Load()+1 > int64(m.cfg.Market.StorageMarketConfig.MK20.MaxParallelChunkUploads) { + log.Errorw("max parallel uploads reached", "deal", id, "chunk", chunk, "error", "max parallel uploads reached") + http.Error(w, "too many parallel uploads for provider", int(UploadRateLimit)) + return + } + + ctx := context.Background() + defer func() { + _ = data.Close() + }() + + if chunk < 1 { + http.Error(w, "chunk must be greater than 0", int(UploadBadRequest)) + return + } + + var chunkDetails []struct { + Chunk int `db:"chunk"` + Size int64 `db:"chunk_size"` + Complete bool `db:"complete"` + RefID sql.NullInt64 `db:"ref_id"` + } + err := m.DB.Select(ctx, &chunkDetails, `SELECT chunk, chunk_size, ref_id, complete + FROM market_mk20_deal_chunk + WHERE id = $1 AND chunk = $2`, id.String(), chunk) + if err != nil { + log.Errorw("failed to check if chunk exists", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + } + + if len(chunkDetails) == 0 { + http.Error(w, "chunk not found", int(UploadNotFound)) + return + } + + if len(chunkDetails) > 1 { + log.Errorw("chunk exists multiple times", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if chunkDetails[0].Complete { + http.Error(w, "chunk already uploaded", int(UploadChunkAlreadyUploaded)) + return + } + + if chunkDetails[0].RefID.Valid { + http.Error(w, "chunk already uploaded", int(UploadChunkAlreadyUploaded)) + return + } + + log.Debugw("uploading chunk", "deal", id, "chunk", chunk) + + chunkSize := chunkDetails[0].Size + reader := NewTimeoutLimitReader(data, time.Second*5) + m.maxParallelUploads.Add(1) + defer func() { + m.maxParallelUploads.Add(-1) + }() + + // Generate unique tmp pieceCID and Size for parked_pieces tables + wr := new(commp.Calc) + n, err := fmt.Fprintf(wr, "%s, %d, %d, %s", id.String(), chunk, chunkSize, time.Now().String()) + if err != nil { + log.Errorw("failed to generate unique tmp pieceCID and Size for parked_pieces tables", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + digest, tsize, err := wr.Digest() + if err != nil { + panic(err) + } + + tpcid := cid.NewCidV1(cid.FilCommitmentUnsealed, digest) + var pnum, refID int64 + + // Generate piece park details with tmp pieceCID and Size + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = tx.QueryRow(`SELECT id FROM parked_pieces + WHERE piece_cid = $1 + AND piece_padded_size = $2 + AND piece_raw_size = $3`, tpcid.String(), tsize, n).Scan(&pnum) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term, skip) + VALUES ($1, $2, $3, FALSE, TRUE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id`, tpcid.String(), tsize, n).Scan(&pnum) + if err != nil { + return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) + } + } else { + return false, xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + // Add parked_piece_ref + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, FALSE) RETURNING ref_id`, pnum, "/PUT").Scan(&refID) + if err != nil { + return false, xerrors.Errorf("inserting parked piece ref: %w", err) + } + + return true, nil + }) + + if err != nil { + log.Errorw("failed to update chunk", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if !comm { + log.Errorw("failed to update chunk", "deal", id, "chunk", chunk, "error", "failed to commit transaction") + http.Error(w, "", int(UploadServerError)) + return + } + + log.Debugw("tmp piece details generated for the chunk", "deal", id, "chunk", chunk) + + failed := true + defer func() { + if failed { + _, err = m.DB.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + if err != nil { + log.Errorw("failed to delete parked piece ref", "deal", id, "chunk", chunk, "error", err) + } + } + }() + + // Store the piece and generate PieceCID and Size + pi, _, err := m.sc.WriteUploadPiece(ctx, storiface.PieceNumber(pnum), chunkSize, reader, storiface.PathSealing, true) + if err != nil { + log.Errorw("failed to write piece", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + log.Debugw("piece stored", "deal", id, "chunk", chunk) + + // Update piece park details with correct values + comm, err = m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE parked_pieces SET + piece_cid = $1, + piece_padded_size = $2, + piece_raw_size = $3, + complete = true + WHERE id = $4`, + pi.PieceCID.String(), pi.Size, chunkSize, pnum) + if err != nil { + return false, xerrors.Errorf("updating parked piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating parked piece: expected 1 row updated, got %d", n) + } + + n, err = tx.Exec(`UPDATE market_mk20_deal_chunk SET + complete = TRUE, + completed_at = NOW() AT TIME ZONE 'UTC', + ref_id = $1 + WHERE id = $2 + AND chunk = $3 + AND complete = FALSE + AND ref_id IS NULL`, refID, id.String(), chunk) + if err != nil { + return false, xerrors.Errorf("updating chunk url: %w", err) + } + + return n == 1, nil + }) + + if err != nil { + log.Errorw("failed to update chunk", "deal", id, "chunk", chunk, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if !comm { + log.Errorw("failed to update chunk", "deal", id, "chunk", chunk, "error", "failed to commit transaction") + http.Error(w, "", int(UploadServerError)) + return + } + + log.Debugw("chunk upload finished", "deal", id, "chunk", chunk) + + failed = false + w.WriteHeader(int(UploadOk)) +} + +// HandleUploadFinalize completes the upload process for a deal by verifying its chunks, updating the deal, and marking the upload as finalized. +// @param id ulid.ULID +// @param deal *Deal [optional] +// @Return DealCode + +func (m *MK20) HandleUploadFinalize(id ulid.ULID, deal *Deal, w http.ResponseWriter, auth string) { + ctx := context.Background() + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_deal_chunk + WHERE id = $1 AND complete = FALSE OR complete IS NULL + )`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if deal upload has started", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if exists { + http.Error(w, "deal upload has not finished", http.StatusBadRequest) + return + } + + ddeal, err := DealFromDB(ctx, m.DB, id) + if err != nil { + log.Errorw("failed to get deal from db", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if ddeal.Data == nil && deal == nil { + log.Errorw("cannot finalize deal with missing data source", "deal", id) + http.Error(w, "cannot finalize deal with missing data source", int(ErrBadProposal)) + return + } + + var rawSize uint64 + var newDeal *Deal + var dealUpdated bool + + if deal != nil { + // This is a deal where DataSource was not set - we should update the deal + code, ndeal, _, err := m.updateDealDetails(id, deal, auth) + if err != nil { + log.Errorw("failed to update deal details", "deal", id, "error", err) + if code == ErrServerInternalError { + http.Error(w, "", int(ErrServerInternalError)) + } else { + http.Error(w, err.Error(), int(code)) + } + return + } + rawSize, err = ndeal.RawSize() + if err != nil { + log.Errorw("failed to get raw size of deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + newDeal = ndeal + dealUpdated = true + } else { + rawSize, err = ddeal.RawSize() + if err != nil { + log.Errorw("failed to get raw size of deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + newDeal = ddeal + } + + var valid bool + + err = m.DB.QueryRow(ctx, `SELECT SUM(chunk_size) = $2 AS valid + FROM market_mk20_deal_chunk + WHERE id = $1;`, id.String(), rawSize).Scan(&valid) + if err != nil { + log.Errorw("failed to check if deal upload has started", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if !valid { + log.Errorw("deal upload finalize failed", "deal", id, "error", "deal raw size does not match the sum of chunks") + http.Error(w, "deal raw size does not match the sum of chunks", int(ErrBadProposal)) + return + } + + if newDeal.Products.DDOV1 != nil { + rej, err := m.sanitizeDDODeal(ctx, newDeal) + if err != nil { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej != nil { + if rej.HTTPCode == 500 { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", rej.Reason) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej.HTTPCode != 500 { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", rej.Reason) + http.Error(w, rej.Reason, int(rej.HTTPCode)) + return + } + } + } + + if newDeal.Products.PDPV1 != nil { + rej, err := m.sanitizePDPDeal(ctx, newDeal) + if err != nil { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej != nil { + if rej.HTTPCode == 500 { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", rej.Reason) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej.HTTPCode != 500 { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", rej.Reason) + http.Error(w, rej.Reason, int(rej.HTTPCode)) + return + } + } + } + + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Now update the upload status to trigger the correct pipeline + n, err := tx.Exec(`UPDATE market_mk20_deal_chunk SET finalize = TRUE where id = $1`, id.String()) + if err != nil { + log.Errorw("failed to finalize deal upload", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if n == 0 { + return false, xerrors.Errorf("expected to update %d rows, got 0", n) + } + + _, err = tx.Exec(`DELETE FROM market_mk20_upload_waiting WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("failed to delete upload waiting: %w", err) + } + + if dealUpdated { + // Save the updated deal to DB + err = newDeal.UpdateDeal(tx) + if err != nil { + return false, xerrors.Errorf("failed to update deal: %w", err) + } + } + return true, nil + }) + + if err != nil { + log.Errorw("failed to finalize deal upload", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if !comm { + log.Errorw("failed to finalize deal upload", "deal", id, "error", "failed to commit transaction") + http.Error(w, "", int(ErrServerInternalError)) + return + } + + w.WriteHeader(int(Ok)) +} + +func (m *MK20) updateDealDetails(id ulid.ULID, deal *Deal, auth string) (DealCode, *Deal, []ProductName, error) { + ctx := context.Background() // Let's not use request context to avoid DB inconsistencies + + if deal.Identifier.Compare(id) != 0 { + return ErrBadProposal, nil, nil, xerrors.Errorf("deal ID and proposal ID do not match") + } + + if deal.Data == nil { + return ErrBadProposal, nil, nil, xerrors.Errorf("deal data is nil") + } + + // Validate the deal + code, err := deal.Validate(m.DB, &m.cfg.Market.StorageMarketConfig.MK20, auth) + if err != nil { + return code, nil, nil, err + } + + log.Debugw("deal validated", "deal", deal.Identifier.String()) + + // Verify we have a deal is DB + var exists bool + err = m.DB.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM market_mk20_deal WHERE id = $1)`, id.String()).Scan(&exists) + if err != nil { + return ErrServerInternalError, nil, nil, xerrors.Errorf("failed to check if deal exists: %w", err) + } + + if !exists { + return ErrDealNotFound, nil, nil, xerrors.Errorf("deal not found") + } + + // Get updated deal + ndeal, code, np, err := UpdateDealDetails(ctx, m.DB, id, deal, &m.cfg.Market.StorageMarketConfig.MK20, auth) + if err != nil { + return code, nil, nil, err + } + + return Ok, ndeal, np, nil +} + +func (m *MK20) HandleSerialUpload(id ulid.ULID, body io.Reader, w http.ResponseWriter) { + if m.maxParallelUploads.Load()+1 > int64(m.cfg.Market.StorageMarketConfig.MK20.MaxParallelChunkUploads) { + log.Errorw("max parallel uploads reached", "deal", id, "error", "max parallel uploads reached") + http.Error(w, "too many parallel uploads for provider", int(UploadRateLimit)) + return + } + + ctx := context.Background() + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_upload_waiting + WHERE id = $1 AND chunked IS NULL);`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if upload is waiting for data", "deal", id, "error", err) + w.WriteHeader(int(UploadServerError)) + return + } + if !exists { + http.Error(w, "deal not found", int(UploadStartCodeDealNotFound)) + return + } + + reader := NewTimeoutLimitReader(body, time.Second*5) + m.maxParallelUploads.Add(1) + defer func() { + m.maxParallelUploads.Add(-1) + }() + + // Generate unique tmp pieceCID and Size for parked_pieces tables + wr := new(commp.Calc) + trs, err := fmt.Fprintf(wr, "%s, %s", id.String(), time.Now().String()) + if err != nil { + log.Errorw("failed to generate unique tmp pieceCID and Size for parked_pieces tables", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + digest, tsize, err := wr.Digest() + if err != nil { + panic(err) + } + + trSize := uint64(trs) + + tpcid, err := commcid.DataCommitmentV1ToCID(digest) + if err != nil { + log.Errorw("failed to generate tmp pieceCID", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + deal, err := DealFromDB(ctx, m.DB, id) + if err != nil { + log.Errorw("failed to get deal from db", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + var havePInfo bool + var pinfo *PieceInfo + + if deal.Data != nil { + pi, err := deal.PieceInfo() + if err != nil { + log.Errorw("failed to get piece info from deal", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + } + + tpcid = pi.PieceCIDV1 + tsize = uint64(pi.Size) + trSize = pi.RawSize + havePInfo = true + pinfo = pi + } + + var pnum, refID int64 + pieceExists := true + + // Generate piece park details with tmp pieceCID and Size + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = tx.QueryRow(`SELECT id FROM parked_pieces + WHERE piece_cid = $1 + AND piece_padded_size = $2 + AND piece_raw_size = $3 + AND complete = true`, tpcid.String(), tsize, trSize).Scan(&pnum) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term, skip) + VALUES ($1, $2, $3, TRUE, TRUE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id`, tpcid.String(), tsize, trSize).Scan(&pnum) + if err != nil { + return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) + } + pieceExists = false + } else { + return false, xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + // Add parked_piece_ref + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id`, pnum, "/PUT").Scan(&refID) + if err != nil { + return false, xerrors.Errorf("inserting parked piece ref: %w", err) + } + + // Mark upload as started to prevent someone else from using chunk upload + n, err := tx.Exec(`UPDATE market_mk20_upload_waiting SET chunked = FALSE WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("updating upload waiting: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating upload waiting: expected 1 row updated, got %d", n) + } + + return true, nil + }) + + if err != nil { + log.Errorw("failed to update chunk", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if !comm { + log.Errorw("failed to update chunk", "deal", id, "error", "failed to commit transaction") + http.Error(w, "", int(UploadServerError)) + return + } + + // If we know the piece details and already have it then let's return early + if pieceExists && havePInfo { + w.WriteHeader(int(UploadOk)) + } + + if !havePInfo { + log.Debugw("tmp piece details generated for the chunk", "deal", id) + } + + failed := true + defer func() { + if failed { + _, serr := m.DB.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + if serr != nil { + log.Errorw("failed to delete parked piece ref", "deal", id, "error", serr) + } + + _, serr = m.DB.Exec(ctx, `UPDATE market_mk20_upload_waiting SET chunked = NULL WHERE id = $1`, id.String()) + if serr != nil { + log.Errorw("failed to update upload waiting", "deal", id, "error", serr) + } + } + }() + + // Store the piece and generate PieceCID and Size + pi, rawSize, err := m.sc.WriteUploadPiece(ctx, storiface.PieceNumber(pnum), UploadSizeLimit, reader, storiface.PathSealing, false) + if err != nil { + log.Errorw("failed to write piece", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if havePInfo { + if rawSize != pinfo.RawSize { + log.Errorw("piece raw size does not match", "deal", id, "supplied", pinfo.RawSize, "written", rawSize, "error", "piece raw size does not match") + http.Error(w, "piece raw size does not match", int(UploadBadRequest)) + return + } + + if !pi.PieceCID.Equals(pinfo.PieceCIDV1) { + log.Errorw("piece CID does not match", "deal", id, "error", "piece CID does not match") + http.Error(w, "piece CID does not match", int(UploadBadRequest)) + return + } + if pi.Size != pinfo.Size { + log.Errorw("piece size does not match", "deal", id, "error", "piece size does not match") + http.Error(w, "piece size does not match", int(UploadBadRequest)) + return + } + } + + log.Debugw("piece stored", "deal", id) + + // Update piece park details with correct values + comm, err = m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + if havePInfo { + n, err := tx.Exec(`UPDATE parked_pieces SET complete = true WHERE id = $1`, pnum) + if err != nil { + return false, xerrors.Errorf("updating parked piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating parked piece: expected 1 row updated, got %d", n) + } + } else { + var pid int64 + var complete bool + // Check if we already have the piece, if found then verify access and skip rest of the processing + err = tx.QueryRow(`SELECT id, complete FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = TRUE`, pi.PieceCID.String(), pi.Size).Scan(&pid, &complete) + if err == nil { + if complete { + // If piece exists then check if we can access the data + pr, err := m.sc.PieceReader(ctx, storiface.PieceNumber(pid)) + if err != nil { + // We should fail here because any subsequent operation which requires access to data will also fail + // till this error is fixed + if !errors.Is(err, storiface.ErrSectorNotFound) { + return false, fmt.Errorf("failed to get piece reader: %w", err) + } + + // If piece does not exist then we update piece park table to work with new tmpID + // Update ref table's reference to tmp id + _, err = tx.Exec(`UPDATE parked_piece_refs SET piece_id = $1 WHERE piece_id = $2`, pnum, pid) + if err != nil { + return false, xerrors.Errorf("updating parked piece ref: %w", err) + } + + // Now delete the original piece which has 404 error + _, err = tx.Exec(`DELETE FROM parked_pieces WHERE id = $1`, pid) + if err != nil { + return false, xerrors.Errorf("deleting parked piece: %w", err) + } + + // Update the tmp entry with correct details + n, err := tx.Exec(`UPDATE parked_pieces SET + piece_cid = $1, + piece_padded_size = $2, + piece_raw_size = $3, + complete = true + WHERE id = $4`, + pi.PieceCID.String(), pi.Size, rawSize, pnum) + if err != nil { + return false, xerrors.Errorf("updating parked piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating parked piece: expected 1 row updated, got %d", n) + } + } else { + defer func() { + _ = pr.Close() + }() + // Add parked_piece_ref if no errors + var newRefID int64 + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, FALSE) RETURNING ref_id`, pid, "/PUT").Scan(&newRefID) + if err != nil { + return false, xerrors.Errorf("inserting parked piece ref: %w", err) + } + + // Remove the tmp refs. This will also delete the new tmp parked_pieces entry + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + if err != nil { + return false, xerrors.Errorf("deleting tmp parked piece ref: %w", err) + } + // Update refID to be used later + refID = newRefID + } + } else { + n, err := tx.Exec(`UPDATE parked_pieces SET complete = true WHERE id = $1`, pid) + if err != nil { + return false, xerrors.Errorf("updating parked piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating parked piece: expected 1 row updated, got %d", n) + } + } + } else { + if !errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("failed to check if piece already exists: %w", err) + } + // If piece does not exist then let's update the tmp one + n, err := tx.Exec(`UPDATE parked_pieces SET + piece_cid = $1, + piece_padded_size = $2, + piece_raw_size = $3, + complete = true + WHERE id = $4`, + pi.PieceCID.String(), pi.Size, rawSize, pnum) + if err != nil { + return false, xerrors.Errorf("updating parked piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating parked piece: expected 1 row updated, got %d", n) + } + } + } + + n, err := tx.Exec(`UPDATE market_mk20_upload_waiting SET chunked = FALSE, ref_id = $2 WHERE id = $1`, id.String(), refID) + if err != nil { + return false, xerrors.Errorf("updating upload waiting: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating upload waiting: expected 1 row updated, got %d", n) + } + return true, nil + }) + + if err != nil { + log.Errorw("failed to update chunk", "deal", id, "error", err) + http.Error(w, "", int(UploadServerError)) + return + } + + if !comm { + log.Errorw("failed to update chunk", "deal", id, "error", "failed to commit transaction") + http.Error(w, "", int(UploadServerError)) + return + } + + log.Debugw("chunk upload finished", "deal", id) + + failed = false + w.WriteHeader(int(UploadOk)) +} + +func (m *MK20) HandleSerialUploadFinalize(id ulid.ULID, deal *Deal, w http.ResponseWriter, auth string) { + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + log.Errorf("panic occurred: %v\n%s", r, trace[:n]) + debug.PrintStack() + } + }() + + ctx := context.Background() + var exists bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS ( + SELECT 1 + FROM market_mk20_upload_waiting + WHERE id = $1 AND chunked = FALSE AND ref_id IS NOT NULL);`, id.String()).Scan(&exists) + if err != nil { + log.Errorw("failed to check if upload is waiting for data", "deal", id, "error", err) + w.WriteHeader(int(ErrServerInternalError)) + return + } + + if !exists { + http.Error(w, "deal not found", int(ErrDealNotFound)) + return + } + + ddeal, err := DealFromDB(ctx, m.DB, id) + if err != nil { + log.Errorw("failed to get deal from db", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if ddeal.Data == nil && deal == nil { + log.Errorw("cannot finalize deal with missing data source", "deal", id) + http.Error(w, "cannot finalize deal with missing data source", int(ErrBadProposal)) + return + } + + var pcidStr string + var rawSize, refID, pid, pieceSize int64 + + err = m.DB.QueryRow(ctx, `SELECT r.ref_id, p.piece_cid, p.piece_padded_size, p.piece_raw_size, p.id + FROM market_mk20_upload_waiting u + JOIN parked_piece_refs r ON u.ref_id = r.ref_id + JOIN parked_pieces p ON r.piece_id = p.id + WHERE u.id = $1 + AND p.complete = TRUE + AND p.long_term = TRUE;`, id.String()).Scan(&refID, &pcidStr, &pieceSize, &rawSize, &pid) + if err != nil { + log.Errorw("failed to get piece details", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + pcid, err := cid.Parse(pcidStr) + if err != nil { + log.Errorw("failed to parse piece cid", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + } + + var uDeal *Deal + var dealUpdated bool + + if deal != nil { + // This is a deal where DataSource was not set - we should update the deal + code, ndeal, _, err := m.updateDealDetails(id, deal, auth) + if err != nil { + log.Errorw("failed to update deal details", "deal", id, "error", err) + if code == ErrServerInternalError { + http.Error(w, "", int(ErrServerInternalError)) + } else { + http.Error(w, err.Error(), int(code)) + } + return + } + uDeal = ndeal + dealUpdated = true + } else { + uDeal = ddeal + } + + pi, err := uDeal.PieceInfo() + if err != nil { + log.Errorw("failed to get piece info", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if !pi.PieceCIDV1.Equals(pcid) { + log.Errorw("piece cid mismatch", "deal", id, "expected", pcid, "actual", pi.PieceCIDV1) + http.Error(w, "piece cid mismatch", int(ErrBadProposal)) + return + } + + if pi.Size != abi.PaddedPieceSize(pieceSize) { + log.Errorw("piece size mismatch", "deal", id, "expected", pi.Size, "actual", pieceSize) + http.Error(w, "piece size mismatch", int(ErrBadProposal)) + return + } + + if pi.RawSize != uint64(rawSize) { + log.Errorw("piece raw size mismatch", "deal", id, "expected", pi.RawSize, "actual", rawSize) + http.Error(w, "piece raw size mismatch", int(ErrBadProposal)) + return + } + + if uDeal.Products.DDOV1 != nil { + rej, err := m.sanitizeDDODeal(ctx, uDeal) + if err != nil { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej != nil { + if rej.HTTPCode == 500 { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", rej.Reason) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej.HTTPCode != 500 { + log.Errorw("failed to sanitize DDO deal", "deal", id, "error", rej.Reason) + http.Error(w, rej.Reason, int(rej.HTTPCode)) + return + } + } + } + + if uDeal.Products.PDPV1 != nil { + rej, err := m.sanitizePDPDeal(ctx, uDeal) + if err != nil { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej != nil { + if rej.HTTPCode == 500 { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", rej.Reason) + http.Error(w, "", int(ErrServerInternalError)) + return + } + if rej.HTTPCode != 500 { + log.Errorw("failed to sanitize PDP deal", "deal", id, "error", rej.Reason) + http.Error(w, rej.Reason, int(rej.HTTPCode)) + return + } + } + } + + comm, err := m.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + _, err = tx.Exec(`DELETE FROM market_mk20_upload_waiting WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("failed to delete upload waiting: %w", err) + } + + if dealUpdated { + // Save the updated deal to DB + err = uDeal.UpdateDeal(tx) + if err != nil { + return false, xerrors.Errorf("failed to update deal: %w", err) + } + } + + retv := uDeal.Products.RetrievalV1 + data := uDeal.Data + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + var refUsed bool + + if uDeal.Products.DDOV1 != nil { + ddo := uDeal.Products.DDOV1 + spid, err := address.IDFromAddress(ddo.Provider) + if err != nil { + return false, fmt.Errorf("getting provider ID: %w", err) + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", refID), + } + + var allocationID interface{} + if ddo.AllocationId != nil { + allocationID = *ddo.AllocationId + } else { + allocationID = nil + } + + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, + piece_size, raw_size, url, offline, indexing, announce, + allocation_id, duration, piece_aggregation, deal_aggregation, started, downloaded, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, TRUE, TRUE, TRUE)`, + id.String(), spid, ddo.ContractAddress, uDeal.Client, data.PieceCID.String(), pi.PieceCIDV1.String(), + pi.Size, pi.RawSize, pieceIDUrl.String(), false, retv.Indexing, retv.AnnouncePayload, + allocationID, ddo.Duration, aggregation, aggregation) + + if err != nil { + return false, xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + + log.Debugw("mk20 pipeline created", "deal", id) + + refUsed = true + } + + if uDeal.Products.PDPV1 != nil { + pdp := uDeal.Products.PDPV1 + // Insert the PDP pipeline + if refUsed { + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id + `, pid, "/PUT").Scan(&refID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + } + + n, err := tx.Exec(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, + extra_data, piece_ref, downloaded, deal_aggregation, aggr_index, indexing, announce, announce_payload, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, TRUE, $7, 0, $8, $9, $10, TRUE)`, + id.String(), uDeal.Client, uDeal.Data.PieceCID.String(), *pdp.DataSetID, + pdp.ExtraData, refID, aggregation, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if err != nil { + return false, xerrors.Errorf("inserting in PDP pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting in PDP pipeline: %d rows affected", n) + } + log.Debugw("PDP pipeline created", "deal", id) + } + + return true, nil + }) + + if err != nil { + log.Errorw("failed to finalize deal upload", "deal", id, "error", err) + http.Error(w, "", int(ErrServerInternalError)) + return + } + + if !comm { + log.Errorw("failed to finalize deal upload", "deal", id, "error", "failed to commit transaction") + http.Error(w, "", int(ErrServerInternalError)) + return + } + + w.WriteHeader(int(Ok)) +} + +func removeNotFinalizedUploads(ctx context.Context, db *harmonydb.DB) { + rm := func(ctx context.Context, db *harmonydb.DB) { + var deals []struct { + ID string `db:"id"` + Chunked bool `db:"chunked"` + RefID sql.NullInt64 `db:"ref_id"` + ReadyAt time.Time `db:"ready_at"` + } + + err := db.Select(ctx, &deals, `SELECT id, chunked, ref_id, ready_at + FROM market_mk20_upload_waiting + WHERE chunked IS NOT NULL + AND ready_at <= NOW() AT TIME ZONE 'UTC' - INTERVAL '60 minutes';`) + if err != nil { + log.Errorw("failed to get not finalized uploads", "error", err) + } + + for _, deal := range deals { + if deal.Chunked { + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + _, err = tx.Exec(`DELETE FROM parked_piece_refs p + USING ( + SELECT DISTINCT ref_id + FROM market_mk20_deal_chunk + WHERE id = $1 AND ref_id IS NOT NULL + ) c + WHERE p.ref_id = c.ref_id; + `, deal.ID) + if err != nil { + return false, xerrors.Errorf("deleting piece refs: %w", err) + } + + _, err = tx.Exec(`DELETE FROM market_mk20_deal_chunk WHERE id = $1`, deal.ID) + if err != nil { + return false, xerrors.Errorf("deleting deal chunks: %w", err) + } + + n, err := tx.Exec(`UPDATE market_mk20_upload_waiting + SET chunked = NULL, + ref_id = NULL, + ready_at = NULL + WHERE id = $1;`, deal.ID) + + if err != nil { + return false, xerrors.Errorf("updating upload waiting: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating upload waiting: expected 1 row updated, got %d", n) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorw("failed to delete not finalized uploads", "deal", deal.ID, "error", err) + } + if !comm { + log.Errorw("failed to delete not finalized uploads", "deal", deal.ID, "error", "failed to commit transaction") + } + } else { + if deal.RefID.Valid { + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, deal.RefID.Int64) + if err != nil { + return false, xerrors.Errorf("deleting piece refs: %w", err) + } + + n, err := tx.Exec(`UPDATE market_mk20_upload_waiting + SET chunked = NULL, + ref_id = NULL, + ready_at = NULL + WHERE id = $1;`, deal.ID) + + if err != nil { + return false, xerrors.Errorf("updating upload waiting: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updating upload waiting: expected 1 row updated, got %d", n) + } + + return true, nil + }) + if err != nil { + log.Errorw("failed to delete not finalized uploads", "deal", deal.ID, "error", err) + } + if !comm { + log.Errorw("failed to delete not finalized uploads", "deal", deal.ID, "error", "failed to commit transaction") + } + } + log.Errorw("removing not finalized upload", "deal", deal.ID, "error", "ref_id not found") + } + } + } + + ticker := time.NewTicker(time.Minute * 5) + defer ticker.Stop() + for { + select { + case <-ticker.C: + rm(ctx, db) + case <-ctx.Done(): + return + } + } +} diff --git a/market/mk20/mk20_utils.go b/market/mk20/mk20_utils.go new file mode 100644 index 000000000..bc708e229 --- /dev/null +++ b/market/mk20/mk20_utils.go @@ -0,0 +1,335 @@ +package mk20 + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" +) + +// DealStatus retrieves the status of a specific deal by querying the database and determining the current state for both PDP and DDO processing. +// @param id [ulid.ULID] +// @Return http.StatusNotFound +// @Return http.StatusInternalServerError +// @Return *DealProductStatusResponse + +func (m *MK20) DealStatus(ctx context.Context, id ulid.ULID) *DealStatus { + var pdp_complete, ddo_complete sql.NullBool + var pdp_error, ddo_error sql.NullString + + err := m.DB.QueryRow(ctx, `SELECT + (pdp_v1->>'complete')::boolean AS pdp_complete, + (pdp_v1->>'error')::text AS pdp_error, + (ddo_v1->>'complete')::boolean AS ddo_complete, + (ddo_v1->>'error')::text AS ddo_error + FROM market_mk20_deal + WHERE id = $1;`, id.String()).Scan(&pdp_complete, &pdp_error, &ddo_complete, &ddo_error) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return &DealStatus{ + HTTPCode: http.StatusNotFound, + } + } + log.Errorw("failed to query the db for deal status", "deal", id.String(), "err", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + + deal, err := DealFromDB(ctx, m.DB, id) + if err != nil { + log.Errorw("failed to get deal from db", "deal", id, "error", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + + isPDP := deal.Products.PDPV1 != nil + isDDO := deal.Products.DDOV1 != nil + + // If only PDP is defined + if isPDP && !isDDO { + ret := &DealStatus{ + HTTPCode: http.StatusOK, + Response: &DealProductStatusResponse{ + PDPV1: &DealStatusResponse{ + State: DealStateAccepted, + }, + }, + } + if pdp_complete.Bool { + ret.Response.PDPV1.State = DealStateComplete + } + if pdp_error.Valid && pdp_error.String != "" { + ret.Response.PDPV1.State = DealStateFailed + ret.Response.PDPV1.ErrorMsg = pdp_error.String + } + + if !pdp_complete.Bool { + pdp := deal.Products.PDPV1 + if pdp.AddPiece { + if deal.Data != nil { + // Check if deal is uploaded + var yes bool + err = m.DB.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM market_mk20_upload_waiting WHERE id = $1)`, id.String()).Scan(&yes) + if err != nil { + log.Errorw("failed to query the db for deal status", "deal", id.String(), "err", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + if yes { + ret.Response.PDPV1.State = DealStateAwaitingUpload + } else { + ret.Response.PDPV1.State = DealStateProcessing + } + } else { + ret.Response.PDPV1.State = DealStateAccepted + } + } + + if pdp.CreateDataSet || pdp.DeleteDataSet || pdp.DeletePiece { + ret.Response.PDPV1.State = DealStateProcessing + } + } + + return ret + } + + // If only DDO is defined + if isDDO && !isPDP { + ret := &DealStatus{ + HTTPCode: http.StatusOK, + Response: &DealProductStatusResponse{ + DDOV1: &DealStatusResponse{ + State: DealStateAccepted, + }, + }, + } + if ddo_complete.Bool { + ret.Response.DDOV1.State = DealStateComplete + } + if ddo_error.Valid && ddo_error.String != "" { + ret.Response.DDOV1.State = DealStateFailed + ret.Response.DDOV1.ErrorMsg = ddo_error.String + } + + if !ddo_complete.Bool { + state, err := m.getDDOStatus(ctx, id) + if err != nil { + log.Errorw("failed to get DDO status", "deal", id.String(), "error", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + ret.Response.DDOV1.State = state + } + + return ret + } + + // If both PDP and DDO are defined + if isPDP && isDDO { + ret := &DealStatus{ + HTTPCode: http.StatusOK, + } + + if pdp_complete.Bool { + ret.Response.PDPV1.State = DealStateComplete + } + + if pdp_error.Valid { + ret.Response.PDPV1.State = DealStateFailed + ret.Response.PDPV1.ErrorMsg = pdp_error.String + } + + if ddo_complete.Bool { + ret.Response.DDOV1.State = DealStateComplete + } + + if ddo_error.Valid && ddo_error.String != "" { + ret.Response.DDOV1.State = DealStateFailed + ret.Response.DDOV1.ErrorMsg = ddo_error.String + } + + if !pdp_complete.Bool { + pdp := deal.Products.PDPV1 + if pdp.AddPiece { + if deal.Data != nil { + // Check if deal is uploaded + var yes bool + err = m.DB.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM market_mk20_upload_waiting WHERE id = $1)`, id.String()).Scan(&yes) + if err != nil { + log.Errorw("failed to query the db for deal status", "deal", id.String(), "err", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + if yes { + ret.Response.PDPV1.State = DealStateAwaitingUpload + } else { + ret.Response.PDPV1.State = DealStateProcessing + } + } else { + ret.Response.PDPV1.State = DealStateAccepted + } + } + + if pdp.CreateDataSet || pdp.DeleteDataSet || pdp.DeletePiece { + ret.Response.PDPV1.State = DealStateProcessing + } + } + + if !ddo_complete.Bool { + state, err := m.getDDOStatus(ctx, id) + if err != nil { + log.Errorw("failed to get DDO status", "deal", id.String(), "error", err) + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + } + ret.Response.DDOV1.State = state + } + + return ret + } + + return &DealStatus{ + HTTPCode: http.StatusInternalServerError, + } + +} + +func (m *MK20) getDDOStatus(ctx context.Context, id ulid.ULID) (DealState, error) { + var waitingForPipeline bool + err := m.DB.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM market_mk20_pipeline_waiting WHERE id = $1)`, id.String()).Scan(&waitingForPipeline) + if err != nil { + return DealStateAccepted, err + } + if waitingForPipeline { + return DealStateAccepted, nil + } + + var pdeals []struct { + Sector *int `db:"sector"` + Sealed bool `db:"sealed"` + Indexed bool `db:"indexed"` + } + + err = m.DB.Select(ctx, &pdeals, `SELECT + sector, + sealed, + indexed + FROM + market_mk20_pipeline + WHERE + id = $1`, id.String()) + + if err != nil { + return DealStateAccepted, err + } + + if len(pdeals) > 1 { + return DealStateProcessing, nil + } + + // If deal is still in pipeline + if len(pdeals) == 1 { + pdeal := pdeals[0] + if pdeal.Sector == nil { + return DealStateProcessing, nil + } + if !pdeal.Sealed { + return DealStateSealing, nil + } + if !pdeal.Indexed { + return DealStateIndexing, nil + } + } + + return DealStateComplete, nil +} + +// Supported retrieves and returns maps of product names and data source names with their enabled status, or an error if the query fails. +func (m *MK20) Supported(ctx context.Context) (map[string]bool, map[string]bool, error) { + var products []struct { + Name string `db:"name"` + Enabled bool `db:"enabled"` + } + err := m.DB.Select(ctx, &products, `SELECT name, enabled FROM market_mk20_products`) + if err != nil { + return nil, nil, err + } + + productsMap := make(map[string]bool) + + for _, product := range products { + productsMap[product.Name] = product.Enabled + } + + var sources []struct { + Name string `db:"name"` + Enabled bool `db:"enabled"` + } + err = m.DB.Select(ctx, &sources, `SELECT name, enabled FROM market_mk20_data_source`) + if err != nil { + return nil, nil, err + } + sourcesMap := make(map[string]bool) + for _, source := range sources { + sourcesMap[source.Name] = source.Enabled + } + return productsMap, sourcesMap, nil +} + +type TimeoutLimitReader struct { + r io.Reader + timeout time.Duration + totalBytes int64 +} + +func NewTimeoutLimitReader(r io.Reader, timeout time.Duration) *TimeoutLimitReader { + return &TimeoutLimitReader{ + r: r, + timeout: timeout, + totalBytes: 0, + } +} + +const UploadSizeLimit = int64(1 * 1024 * 1024 * 1024) + +func (t *TimeoutLimitReader) Read(p []byte) (int, error) { + deadline := time.Now().Add(t.timeout) + for { + // Attempt to read + n, err := t.r.Read(p) + if t.totalBytes+int64(n) > UploadSizeLimit { + return 0, fmt.Errorf("upload size limit exceeded: %d bytes", UploadSizeLimit) + } else { + t.totalBytes += int64(n) + } + + if err != nil { + return n, err + } + + if n > 0 { + // Otherwise return bytes read and no error + return n, err + } + + // Timeout: If we hit the deadline without making progress, return a timeout error + if time.Now().After(deadline) { + return 0, fmt.Errorf("upload timeout: no progress (duration: %f Seconds)", t.timeout.Seconds()) + } + + // Avoid tight loop by adding a tiny sleep + time.Sleep(100 * time.Millisecond) // Small pause to avoid busy-waiting + } +} diff --git a/market/mk20/mk20gen/gen.go b/market/mk20/mk20gen/gen.go new file mode 100644 index 000000000..b4a11385a --- /dev/null +++ b/market/mk20/mk20gen/gen.go @@ -0,0 +1,965 @@ +package main + +// +//import ( +// "bytes" +// "flag" +// "fmt" +// "go/ast" +// "go/doc" +// "go/token" +// "go/types" +// "log" +// "os" +// "sort" +// "strings" +// +// "golang.org/x/tools/go/packages" +//) +// +//// Note: This file has too many static things. Go parse package is not easy to work with and +//// is a nightmare. Wasting month[s] to build a correct parses does not seem correct use of time. +// +//type StructInfo struct { +// Name string +// Doc string +// Fields []*FieldInfo +//} +// +//type FieldInfo struct { +// Name string +// Type string +// Tag string +// Doc string +// Typ types.Type +//} +// +//type constEntry struct { +// Name string +// Value string +// Doc string +//} +// +//var visited = map[string]bool{} +//var structMap = map[string]*StructInfo{} +//var rendered = map[string]bool{} +//var constMap = map[string][]constEntry{} +// +//var skipTypes = map[string]bool{ +// "ProviderDealRejectionInfo": true, +// "DBDeal": true, +// "dbProduct": true, +// "dbDataSource": true, +// "productAndDataSource": true, +// "MK20": true, +// "DealStatus": true, +//} +// +//var includeConsts = map[string]bool{ +// "DealCode": true, +// "DealState": true, +// "UploadStatusCode": true, +// "UploadStartCode": true, +// "UploadCode": true, +//} +// +////type ParamDoc struct { +//// Name string +//// Type string +//// Optional bool +//// Comment string +////} +////type ReturnDoc struct { +//// Name string +//// Type string +//// Comment string +////} +// +////// FunctionDoc holds extracted param and return comments for a function. +////type FunctionDoc struct { +//// Params []ParamDoc +//// Returns []ReturnDoc +////} +//// +////type handlerInfo struct { +//// Path string +//// Method string +//// FuncName string +//// Calls map[string]bool +//// Types map[string]bool +//// Constants map[string]bool +//// Errors map[string]bool +//// RequestBodyType string +//// ResponseBodyType string +////} +//// +////var allHandlers = map[string]*handlerInfo{} // key = function name +//// +////var httpCodes = map[string]struct { +//// Code string +//// Msg string +////}{ +//// "http.StatusBadRequest": { +//// Code: "400", +//// Msg: "Bad Request - Invalid input or validation error", +//// }, +//// "http.StatusOK": { +//// Code: "200", +//// Msg: "OK - Success", +//// }, +//// "http.StatusInternalServerError": { +//// Code: "500", +//// Msg: "Internal Server Error", +//// }, +////} +//// +////var ( +//// paramRe = regexp.MustCompile(`@param\s+(\w+)\s+([^\s\[]+)(\s+\[optional\])?(.*)`) +//// returnRe = regexp.MustCompile(`@Return\s+(\w+)?\s*([^\s\[]+)?(.*)`) +////) +// +////func ParseFunctionDocsFromComments(pkgPath string) map[string]*FunctionDoc { +//// fset := token.NewFileSet() +//// pkgs, err := parser.ParseDir(fset, pkgPath, nil, parser.ParseComments) +//// if err != nil { +//// panic(err) +//// } +//// +//// funcDocs := map[string]*FunctionDoc{} +//// +//// for _, pkg := range pkgs { +//// for _, file := range pkg.Files { +//// for _, decl := range file.Decls { +//// fn, ok := decl.(*ast.FuncDecl) +//// if !ok || fn.Doc == nil { +//// continue +//// } +//// +//// doc := &FunctionDoc{} +//// for _, c := range fn.Doc.List { +//// txt := strings.TrimSpace(strings.TrimPrefix(c.Text, "//")) +//// if m := paramRe.FindStringSubmatch(txt); m != nil { +//// doc.Params = append(doc.Params, ParamDoc{ +//// Name: m[1], +//// Type: m[2], +//// Optional: strings.Contains(m[3], "optional"), +//// Comment: strings.TrimSpace(m[4]), +//// }) +//// } else if m := returnRe.FindStringSubmatch(txt); m != nil { +//// doc.Returns = append(doc.Returns, ReturnDoc{ +//// Name: m[1], +//// Type: m[2], +//// Comment: strings.TrimSpace(m[3]), +//// }) +//// } +//// } +//// +//// if len(doc.Params) > 0 || len(doc.Returns) > 0 { +//// funcDocs[fn.Name.Name] = doc +//// } +//// } +//// } +//// } +//// return funcDocs +////} +// +//func main() { +// var pkgPath, output string +// flag.StringVar(&pkgPath, "pkg", "./", "Package to scan") +// flag.StringVar(&output, "output", "info.md", "Output file") +// flag.Parse() +// +// //pkgPath := "/Users/lexluthr/github/filecoin-project/curio/market/mk20" +// //routerFile := filepath.Join(pkgPath, "http", "http.go") +// +// cfg := &packages.Config{ +// Mode: packages.NeedSyntax | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedName | packages.NeedFiles | packages.LoadAllSyntax, +// Fset: token.NewFileSet(), +// } +// +// pkgs, err := packages.Load(cfg, pkgPath) +// if err != nil { +// log.Fatalf("Failed to load package: %v", err) +// } +// +// for _, pkg := range pkgs { +// docPkg, err := doc.NewFromFiles(cfg.Fset, pkg.Syntax, pkg.PkgPath) +// if err != nil { +// log.Fatalf("Failed to parse package: %v", err) +// } +// for _, t := range docPkg.Types { +// if st, ok := t.Decl.Specs[0].(*ast.TypeSpec); ok { +// if structType, ok := st.Type.(*ast.StructType); ok { +// name := st.Name.Name +// if visited[name] || skipTypes[name] { +// continue +// } +// visited[name] = true +// collectStruct(pkg, name, structType, t.Doc) +// } +// } +// } +// for _, file := range pkg.Syntax { +// for _, decl := range file.Decls { +// genDecl, ok := decl.(*ast.GenDecl) +// if !ok || genDecl.Tok != token.CONST { +// continue +// } +// +// for _, spec := range genDecl.Specs { +// vspec := spec.(*ast.ValueSpec) +// for _, name := range vspec.Names { +// obj := pkg.TypesInfo.Defs[name] +// if obj == nil { +// continue +// } +// typ := obj.Type().String() // e.g., "main.ErrCode" +// parts := strings.Split(typ, ".") +// typeName := parts[len(parts)-1] // just "ErrCode" +// +// if !includeConsts[typeName] { +// continue +// } +// +// if !rendered[typeName] { +// constMap[typeName] = []constEntry{} +// rendered[typeName] = true +// } +// +// val := "" +// if con, ok := obj.(*types.Const); ok { +// val = con.Val().ExactString() +// } +// cdoc := strings.TrimSpace(vspec.Doc.Text()) +// constMap[typeName] = append(constMap[typeName], constEntry{ +// Name: name.Name, +// Value: val, +// Doc: cdoc, +// }) +// } +// } +// } +// } +// } +// +// //fm := ParseFunctionDocsFromComments(pkgPath) +// //for fname, doc := range fm { +// // fmt.Printf("Function: %s\n", fname) +// // if len(doc.Params) > 0 { +// // fmt.Println(" Params:") +// // for _, p := range doc.Params { +// // fmt.Printf(" - %s %s", p.Name, p.Type) +// // if p.Optional { +// // fmt.Print(" (optional)") +// // } +// // if p.Comment != "" { +// // fmt.Printf(" -- %s", p.Comment) +// // } +// // fmt.Println() +// // } +// // } +// // if len(doc.Returns) > 0 { +// // fmt.Println(" Returns:") +// // for _, r := range doc.Returns { +// // fmt.Printf(" - Name: %s Type: %s", r.Name, r.Type) +// // if r.Comment != "" { +// // fmt.Printf(" -- Comment: %s", r.Comment) +// // } +// // fmt.Println() +// // } +// // } +// //} +// +// writeOutput(output) +// //parseMux(routerFile) +// //fmt.Println("Done tracing handlers") +// //parseHandlerBodies(routerFile) +// //fmt.Println("Done parsing handler bodies") +// //for k, v := range allHandlers { +// // fmt.Println("------------------") +// // fmt.Println("Name:", k) +// // fmt.Println("Path:", v.Path) +// // fmt.Println("Method:", v.Method) +// // fmt.Println("Constants", v.Constants) +// // fmt.Println("Calls:", v.Calls) +// // fmt.Println("Types:", v.Types) +// // fmt.Println("RequestBody", v.RequestBodyType) +// // fmt.Println("ResponseBody", v.ResponseBodyType) +// // fmt.Println("------------------") +// //} +// //fmt.Println("----------------") +// //fmt.Println("----------------") +// //for k, v := range constMap { +// // fmt.Println("Name:", k) +// // for _, e := range v { +// // fmt.Printf(" - %s: %s\n", e.Name, e.Value) +// // } +// //} +// //fmt.Println("----------------") +// //fmt.Println("----------------") +// //for _, h := range allHandlers { +// // fmt.Printf("%s %s\n", h.Method, h.Path) +// // // Optional: print summary from docs if available +// // // Parameters +// // mainCall := "" +// // for call := range h.Calls { +// // if strings.HasPrefix(call, "mk20.") { +// // mainCall = strings.TrimPrefix(call, "mk20.") +// // break +// // } +// // } +// // if mainCall != "" { +// // fmt.Println("### Parameters") +// // if doc, ok := fm[mainCall]; ok { +// // for _, param := range doc.Params { +// // fmt.Printf("- %s (%s)%s\n", param.Name, param.Type, +// // func() string { +// // if param.Optional { +// // return " [optional]" +// // } +// // return "" +// // }()) +// // } +// // } else if len(h.Types) > 0 { +// // // fallback: print type +// // for typ := range h.Types { +// // fmt.Printf("- body (%s)\n", typ) +// // } +// // } +// // } +// // // Responses +// // fmt.Println("### Possible Responses") +// // for code := range h.Constants { +// // switch code { +// // case "http.StatusBadRequest": +// // fmt.Println("- 400 Bad Request: Invalid input or validation error.") +// // case "http.StatusOK": +// // fmt.Println("- 200 OK: Success.") +// // case "http.StatusInternalServerError": +// // fmt.Println("- 500 Internal Server Error.") +// // // ... add more as needed +// // default: +// // fmt.Printf("- %s\n", code) +// // } +// // } +// // fmt.Println() +// //} +// +// //formatHandlerDocs(fm) +// //generateSwaggoComments(fm) +// +//} +// +////func extractPathParams(path string) []string { +//// var out []string +//// for _, part := range strings.Split(path, "/") { +//// if strings.HasPrefix(part, "{") && strings.HasSuffix(part, "}") { +//// out = append(out, strings.TrimSuffix(strings.TrimPrefix(part, "{"), "}")) +//// } +//// } +//// return out +////} +// +////func generateSwaggoComments(funcDocs map[string]*FunctionDoc) { +//// for _, h := range allHandlers { +//// fmt.Printf("// @Router %s [%s]\n", h.Path, strings.ToLower(h.Method)) +//// +//// // Path parameters from {id}, {chunkNum}, etc. +//// for _, param := range extractPathParams(h.Path) { +//// fmt.Printf("// @Param %s path string true \"%s\"\n", param, param) +//// } +//// +//// // Request body +//// if h.RequestBodyType != "" { +//// fmt.Println("// @accepts json") +//// fmt.Printf("// @Param body body %s true\n", h.RequestBodyType) +//// fmt.Println("// @Accept json\n// @Produce json") +//// } else if h.Method == "PUT" { +//// fmt.Println("// @accepts bytes") +//// fmt.Printf("// @Param body body []byte true \"raw binary\"\n") +//// } +//// +//// // Figure out function called like mk20.Something +//// var mk20Call string +//// for call := range h.Calls { +//// if strings.HasPrefix(call, "mk20.") { +//// mk20Call = strings.TrimPrefix(call, "mk20.") +//// break +//// } +//// } +//// +//// // Return codes (Swagger `@Success` / `@Failure`) +//// hasReturn := false +//// if doc, ok := funcDocs[mk20Call]; ok { +//// for _, ret := range doc.Returns { +//// key := strings.TrimPrefix(ret.Name, "*") +//// key = strings.TrimPrefix(key, "mk20.") +//// if entries, ok := constMap[key]; ok { +//// for _, entry := range entries { +//// msg := strings.TrimSuffix(entry.Doc, ".") +//// tag := "@Failure" +//// if strings.HasPrefix(fmt.Sprintf("%d", entry.Value), "2") { +//// tag = "@Success" +//// hasReturn = true +//// } else { +//// fmt.Printf("// %s %s {object} %s \"%s\"\n", tag, entry.Value, key, msg) +//// } +//// } +//// } +//// } +//// // Fallback to direct http constants if nothing above +//// for k := range h.Constants { +//// if msg, ok := httpCodes[k]; ok { +//// tag := "@Failure" +//// if strings.HasPrefix(fmt.Sprintf("%d", msg.Code), "2") { +//// tag = "@Success" +//// hasReturn = true +//// } else { +//// fmt.Printf("// %s %s {string} string \"%s\"\n", tag, msg.Code, msg.Msg) +//// } +//// +//// } +//// } +//// +//// // If known response type +//// if h.ResponseBodyType != "" && hasReturn { +//// fmt.Println("// @produce json") +//// fmt.Printf("// @Success 200 {object} %s\n", h.ResponseBodyType) +//// } +//// } else { +//// // Fallback to direct http constants if nothing above +//// for k := range h.Constants { +//// if msg, ok := httpCodes[k]; ok { +//// tag := "@Failure" +//// if strings.HasPrefix(fmt.Sprintf("%d", msg.Code), "2") { +//// tag = "@Success" +//// hasReturn = true +//// } else { +//// fmt.Printf("// %s %s {string} string \"%s\"\n", tag, msg.Code, msg.Msg) +//// } +//// //fmt.Printf("// %s %s {string} string \"%s\"\n", tag, msg.Code, msg.Msg) +//// } +//// } +//// +//// // If known response type +//// if h.ResponseBodyType != "" && hasReturn { +//// fmt.Println("// @produce json") +//// fmt.Printf("// @Success 200 {object} %s\n", h.ResponseBodyType) +//// } +//// } +//// +//// fmt.Println() +//// } +////} +//// +////func formatHandlerDocs(funcDocs map[string]*FunctionDoc) { +//// for _, h := range allHandlers { +//// fmt.Printf("%s %s\n", h.Method, h.Path) +//// +//// // 1. Find the mk20 call +//// var mk20Call string +//// for call := range h.Calls { +//// if strings.HasPrefix(call, "mk20.") { +//// mk20Call = strings.TrimPrefix(call, "mk20.") +//// //fmt.Println("mk20Call: ", mk20Call) +//// break +//// } +//// } +//// +//// // 2. Look up params and returns +//// doc, ok := funcDocs[mk20Call] +//// if ok { +//// if h.RequestBodyType != "" { +//// fmt.Printf("### Request Body\n- %s\n", h.RequestBodyType) +//// } +//// if h.RequestBodyType == "" && h.Method == "PUT" { +//// fmt.Printf("### Request Body\n- bytes\n") +//// } +//// +//// // 3. Lookup constMap based on return types +//// fmt.Println("### Possible Responses") +//// for _, ret := range doc.Returns { +//// key := strings.TrimPrefix(ret.Name, "*") +//// key = strings.TrimPrefix(key, "mk20.") +//// if entries, ok := constMap[key]; ok { +//// for _, entry := range entries { +//// comment := entry.Doc +//// comment = strings.TrimSuffix(comment, ".") +//// if comment == "" { +//// fmt.Printf("- %s: %s\n", entry.Value, entry.Name) +//// } else { +//// fmt.Printf("- %s: %s - %s\n", entry.Value, entry.Name, comment) +//// } +//// } +//// } +//// } +//// for k, _ := range h.Constants { +//// fmt.Printf("- %s\n", httpCodes[k]) +//// } +//// if h.ResponseBodyType != "" { +//// fmt.Printf("### Response Body\n- %s\n", h.ResponseBodyType) +//// } +//// } else { +//// //fmt.Println("### Parameters") +//// if h.RequestBodyType != "" { +//// fmt.Printf("### Request Body\n- %s\n", h.RequestBodyType) +//// } +//// if h.RequestBodyType == "" && h.Method == "PUT" { +//// fmt.Printf("### Request Body\n- bytes\n") +//// } +//// fmt.Println("### Possible Responses") +//// for k, _ := range h.Constants { +//// fmt.Printf("- %s\n", httpCodes[k]) +//// } +//// if h.ResponseBodyType != "" { +//// fmt.Printf("### Response Body\n- %s\n", h.ResponseBodyType) +//// } +//// } +//// fmt.Println() +//// } +////} +// +//func collectStruct(pkg *packages.Package, name string, structType *ast.StructType, docText string) { +// info := StructInfo{ +// Name: name, +// Doc: strings.TrimSpace(docText), +// } +// +// for _, field := range structType.Fields.List { +// var fieldName string +// if len(field.Names) > 0 { +// fieldName = field.Names[0].Name +// } else { +// fieldName = fmt.Sprintf("%s", field.Type) +// } +// +// var fieldType string +// var typ types.Type +// if t := pkg.TypesInfo.TypeOf(field.Type); t != nil { +// typ = t +// fieldType = types.TypeString(t, func(p *types.Package) string { +// return p.Name() +// }) +// } +// +// fieldTag := "" +// if field.Tag != nil { +// fieldTag = field.Tag.Value +// } +// +// var fieldDoc string +// if field.Doc != nil { +// lines := strings.Split(field.Doc.Text(), "\n") +// for i := range lines { +// lines[i] = strings.TrimSpace(lines[i]) +// } +// fieldDoc = strings.Join(lines, " ") +// } +// +// info.Fields = append(info.Fields, &FieldInfo{ +// Name: fieldName, +// Type: fieldType, +// Tag: fieldTag, +// Doc: fieldDoc, +// Typ: typ, +// }) +// +// baseType := fieldType +// +// baseType = strings.TrimPrefix(baseType, "*") +// +// baseType = strings.TrimPrefix(baseType, "[]") +// baseType = strings.Split(baseType, ".")[0] +// if skipTypes[baseType] { +// continue +// } +// if !visited[baseType] { +// visited[baseType] = true +// collectFromImports(baseType) +// } +// } +// +// structMap[name] = &info +//} +// +//func collectFromImports(typeName string) { +// // future: support nested imports with doc.New(...) +//} +// +//func writeOutput(path string) { +// var buf bytes.Buffer +// +// buf.WriteString("# Storage Market Interface\n\n") +// buf.WriteString("This document describes the storage market types and supported HTTP methods for making deals with Curio Storage Provider.\n\n") +// +// buf.WriteString("## \U0001F4E1 MK20 HTTP API Overview\n\n") +// buf.WriteString("The MK20 storage market module provides a set of HTTP endpoints under `/market/mk20` that allow clients to submit, track, and finalize storage deals with storage providers. This section documents all available routes and their expected behavior.\n\n") +// +// buf.WriteString("### Base URL\n\n" + +// "The base URL for all MK20 endpoints is: \n\n" + +// "```\n\n/market/mk20\n\n```" + +// "\n\n") +// +// buf.WriteString("### 🔄 POST /store\n\n") +// buf.WriteString("Submit a new MK20 deal.\n\n") +// buf.WriteString("- **Content-Type**: N/A\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**: N/A\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: Deal accepted\n") +// buf.WriteString(" - Other [HTTP codes](#constants-for-errorcode) indicate validation failure, rejection, or system errors\n\n") +// +// buf.WriteString("### 🧾 GET /status?id=\n\n") +// buf.WriteString("Retrieve the current status of a deal.\n\n") +// buf.WriteString("- **Content-Type**: `application/json`\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**:\n") +// buf.WriteString(" - `id`: Deal identifier in [ULID](https://github.com/ulid/spec) format\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: JSON-encoded [deal status](#dealstatusresponse) information\n") +// buf.WriteString(" - `400 Bad Request`: Missing or invalid ID\n") +// buf.WriteString(" - `500 Internal Server Error`: If backend fails to respond\n\n") +// +// buf.WriteString("### 📜 GET /contracts\n\n") +// buf.WriteString("- **Content-Type**: N/A\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**: N/A\n") +// buf.WriteString("Return the list of contract addresses supported by the provider.\n\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: [JSON array of contract addresses](#supportedcontracts)\n") +// buf.WriteString(" - `500 Internal Server Error`: Query or serialization failure\n\n") +// +// buf.WriteString("### 🗂 PUT /data?id=\n\n") +// buf.WriteString("Upload deal data after the deal has been accepted.\n\n") +// buf.WriteString("- **Content-Type**: `application/octet-stream`\n") +// buf.WriteString("- **Body**: Deal data bytes\n") +// buf.WriteString("- **Query Parameter**:\n -`id`: Deal identifier in [ULID](https://github.com/ulid/spec) format\n") +// buf.WriteString("- **Headers**:\n") +// buf.WriteString(" - `Content-Length`: must be deal's raw size\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: if data is successfully streamed\n") +// buf.WriteString(" - `400`, `413`, or `415`: on validation failures\n\n") +// +// buf.WriteString("### 🧠 GET /info\n\n") +// buf.WriteString("- **Content-Type**: N/A\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**: N/A\n") +// buf.WriteString("Fetch markdown-formatted documentation that describes the supported deal schema, products, and data sources.\n\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: with markdown content of the info file\n") +// buf.WriteString(" - `500 Internal Server Error`: if file is not found or cannot be read\n\n") +// +// buf.WriteString("### 🧰 GET /products\n\n") +// buf.WriteString("- **Content-Type**: N/A\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**: N/A\n") +// buf.WriteString("Fetch json list of the supported products.\n\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: with json content\n") +// buf.WriteString(" - `500 Internal Server Error`: if info cannot be read\n\n") +// +// buf.WriteString("### 🌐 GET /sources\n\n") +// buf.WriteString("- **Content-Type**: N/A\n") +// buf.WriteString("- **Body**: N/A\n") +// buf.WriteString("- **Query Parameters**: N/A\n") +// buf.WriteString("Fetch json list of the supported data sources.\n\n") +// buf.WriteString("- **Response**:\n") +// buf.WriteString(" - `200 OK`: with json content\n") +// buf.WriteString(" - `500 Internal Server Error`: if info cannot be read\n\n") +// +// buf.WriteString("## Supported Deal Types\n\n") +// buf.WriteString("This document lists the data types and fields supported in the new storage market interface. It defines the deal structure, accepted data sources, and optional product extensions. Clients should use these definitions to format and validate their deals before submission.\n\n") +// +// ordered := []string{"Deal", "DataSource", "Products"} +// var rest []string +// for k := range structMap { +// if k != "Deal" && k != "DataSource" && k != "Products" { +// rest = append(rest, k) +// } +// } +// sort.Strings(rest) +// keys := append(ordered, rest...) +// +// for _, k := range keys { +// s, ok := structMap[k] +// if !ok { +// continue +// } +// buf.WriteString(fmt.Sprintf("### %s\n\n", s.Name)) +// if s.Doc != "" { +// buf.WriteString(s.Doc + "\n\n") +// } +// buf.WriteString("| Field | Type | Tag | Description |\n") +// buf.WriteString("|-------|------|-----|-------------|\n") +// for _, f := range s.Fields { +// typeName := f.Type +// linkTarget := "" +// +// // Strip common wrappers like pointer/star and slice +// trimmed := strings.TrimPrefix(typeName, "*") +// trimmed = strings.TrimPrefix(trimmed, "[]") +// parts := strings.Split(trimmed, ".") +// baseType := parts[len(parts)-1] +// +// if _, ok := structMap[baseType]; ok { +// linkTarget = fmt.Sprintf("[%s](#%s)", f.Type, strings.ToLower(baseType)) +// } else if _, ok := constMap[baseType]; ok { +// linkTarget = fmt.Sprintf("[%s](#constants-for-%s)", f.Type, strings.ToLower(baseType)) +// } else { +// typ := f.Typ +// if ptr, ok := typ.(*types.Pointer); ok { +// typ = ptr.Elem() +// } +// if named, ok := typ.(*types.Named); ok && named.Obj() != nil && named.Obj().Pkg() != nil { +// +// pkgPath := named.Obj().Pkg().Path() +// objName := named.Obj().Name() +// linkTarget = fmt.Sprintf("[%s](https://pkg.go.dev/%s#%s)", typeName, pkgPath, objName) +// } else if typ != nil && typ.String() == baseType { +// linkTarget = fmt.Sprintf("[%s](https://pkg.go.dev/builtin#%s)", typeName, baseType) +// } else if slice, ok := typ.(*types.Slice); ok { +// elem := slice.Elem() +// if basic, ok := elem.(*types.Basic); ok { +// linkTarget = fmt.Sprintf("[%s](https://pkg.go.dev/builtin#%s)", typeName, basic.Name()) +// } else { +// linkTarget = typeName +// } +// } else { +// linkTarget = typeName +// } +// } +// +// buf.WriteString(fmt.Sprintf("| %s | %s | %s | %s |\n", +// f.Name, linkTarget, strings.Trim(f.Tag, "`"), f.Doc)) +// } +// buf.WriteString("\n") +// } +// +// // Render constants with sort order +// for k, v := range constMap { +// if len(v) == 0 { +// continue +// } +// buf.WriteString(fmt.Sprintf("### Constants for %s\n\n", k)) +// buf.WriteString("| Constant | Code | Description |\n") +// buf.WriteString("|----------|------|-------------|\n") +// for _, c := range v { +// buf.WriteString(fmt.Sprintf("| %s | %s | %s |\n", c.Name, c.Value, c.Doc)) +// } +// buf.WriteString("\n") +// } +// +// os.Stdout.WriteString(buf.String()) +// +// err := os.WriteFile(path, buf.Bytes(), 0644) +// if err != nil { +// log.Fatalf("Failed to write output: %v", err) +// } +//} +// +////func parseMux(path string) { +//// fset := token.NewFileSet() +//// +//// node, err := parser.ParseFile(fset, path, nil, 0) +//// if err != nil { +//// log.Fatalf("Parse error: %v", err) +//// } +//// +//// ast.Inspect(node, func(n ast.Node) bool { +//// call, ok := n.(*ast.CallExpr) +//// if !ok || len(call.Args) < 2 { +//// return true +//// } +//// +//// sel, ok := call.Fun.(*ast.SelectorExpr) +//// if !ok { +//// return true +//// } +//// +//// method := sel.Sel.Name +//// var path string +//// var fnName string +//// +//// switch method { +//// case "Get", "Post", "Put", "Delete": +//// if len(call.Args) != 2 { +//// return true +//// } +//// pathLit, ok := call.Args[0].(*ast.BasicLit) +//// if !ok || pathLit.Kind != token.STRING { +//// return true +//// } +//// method = strings.ToUpper(method) +//// path = strings.Trim(pathLit.Value, "\"") +//// fnName = call.Args[1].(*ast.SelectorExpr).Sel.Name +//// +//// case "Method": +//// if len(call.Args) != 3 { +//// return true +//// } +//// methodLit, ok := call.Args[0].(*ast.BasicLit) +//// if !ok || methodLit.Kind != token.STRING { +//// return true +//// } +//// method = strings.Trim(methodLit.Value, "\"") +//// +//// pathLit, ok := call.Args[1].(*ast.BasicLit) +//// if !ok || pathLit.Kind != token.STRING { +//// return true +//// } +//// path = strings.Trim(pathLit.Value, "\"") +//// fnName = extractHandlerFunc(call.Args[2]) +//// +//// default: +//// return true +//// } +//// +//// allHandlers[fnName] = &handlerInfo{ +//// Path: path, +//// Method: method, +//// FuncName: fnName, +//// Errors: map[string]bool{}, +//// } +//// return true +//// }) +////} +// +////func extractHandlerFunc(expr ast.Expr) string { +//// call, ok := expr.(*ast.CallExpr) +//// if !ok { +//// return "unknown" +//// } +//// +//// switch fun := call.Fun.(type) { +//// case *ast.SelectorExpr: +//// if fun.Sel.Name == "TimeoutHandler" && len(call.Args) > 0 { +//// return extractHandlerFunc(call.Args[0]) +//// } +//// if fun.Sel.Name == "HandlerFunc" && len(call.Args) > 0 { +//// if sel, ok := call.Args[0].(*ast.SelectorExpr); ok { +//// return sel.Sel.Name +//// } +//// } +//// } +//// return "unknown" +////} +// +////func parseHandlerBodies(path string) { +//// fset := token.NewFileSet() +//// file, err := parser.ParseFile(fset, path, nil, parser.AllErrors|parser.ParseComments) +//// if err != nil { +//// log.Fatalf("Parse error: %v", err) +//// } +//// for _, decl := range file.Decls { +//// fn, ok := decl.(*ast.FuncDecl) +//// if !ok || fn.Body == nil { +//// continue +//// } +//// name := fn.Name.Name +//// handler, exists := allHandlers[name] +//// if !exists { +//// continue +//// } +//// +//// calls := map[string]bool{} +//// types := map[string]bool{} +//// constants := map[string]bool{} +//// var reqType string +//// var respType string +//// +//// ast.Inspect(fn.Body, func(n ast.Node) bool { +//// switch node := n.(type) { +//// case *ast.CallExpr: +//// if sel, ok := node.Fun.(*ast.SelectorExpr); ok { +//// // http.WriteHeader or http.Error +//// if ident, ok := sel.X.(*ast.Ident); ok && ident.Name == "http" { +//// if sel.Sel.Name == "WriteHeader" || sel.Sel.Name == "Error" { +//// calls["http."+sel.Sel.Name] = true +//// } +//// } +//// // mdh.dm.MK20Handler. +//// if x1, ok := sel.X.(*ast.SelectorExpr); ok { +//// if x2, ok := x1.X.(*ast.SelectorExpr); ok { +//// if x2.X.(*ast.Ident).Name == "mdh" && +//// x2.Sel.Name == "dm" && +//// x1.Sel.Name == "MK20Handler" { +//// calls["mk20."+sel.Sel.Name] = true +//// } +//// } +//// } +//// // Detect json.Unmarshal(b, &type) +//// if sel.Sel.Name == "Unmarshal" && len(node.Args) == 2 { +//// if unary, ok := node.Args[1].(*ast.UnaryExpr); ok { +//// if ident, ok := unary.X.(*ast.Ident); ok { +//// reqType = findVarType(fn, ident.Name) +//// } +//// } +//// } +//// // Detect json.Marshal(type) +//// if sel.Sel.Name == "Marshal" && len(node.Args) == 1 { +//// if ident, ok := node.Args[0].(*ast.Ident); ok { +//// respType = findVarType(fn, ident.Name) +//// } +//// } +//// } +//// case *ast.AssignStmt: +//// for _, rhs := range node.Rhs { +//// if ce, ok := rhs.(*ast.CallExpr); ok { +//// if sel, ok := ce.Fun.(*ast.SelectorExpr); ok { +//// if ident, ok := sel.X.(*ast.Ident); ok && ident.Name == "http" { +//// if strings.HasPrefix(sel.Sel.Name, "Status") { +//// constants["http."+sel.Sel.Name] = true +//// } +//// } +//// } +//// } +//// } +//// case *ast.ValueSpec: +//// if node.Type != nil { +//// if se, ok := node.Type.(*ast.SelectorExpr); ok { +//// if ident, ok := se.X.(*ast.Ident); ok && ident.Name == "mk20" { +//// types["mk20."+se.Sel.Name] = true +//// } +//// } +//// } +//// case *ast.SelectorExpr: +//// if ident, ok := node.X.(*ast.Ident); ok && ident.Name == "http" { +//// if strings.HasPrefix(node.Sel.Name, "Status") { +//// constants["http."+node.Sel.Name] = true +//// } +//// } +//// } +//// return true +//// }) +//// +//// handler.Calls = calls +//// handler.Types = types +//// handler.Constants = constants +//// handler.RequestBodyType = reqType +//// handler.ResponseBodyType = respType +//// } +////} +//// +////// Helper to find type of variable declared in function scope +////func findVarType(fn *ast.FuncDecl, name string) string { +//// for _, stmt := range fn.Body.List { +//// if ds, ok := stmt.(*ast.DeclStmt); ok { +//// if gd, ok := ds.Decl.(*ast.GenDecl); ok { +//// for _, spec := range gd.Specs { +//// if vs, ok := spec.(*ast.ValueSpec); ok { +//// for _, ident := range vs.Names { +//// if ident.Name == name { +//// if se, ok := vs.Type.(*ast.SelectorExpr); ok { +//// if x, ok := se.X.(*ast.Ident); ok { +//// return x.Name + "." + se.Sel.Name +//// } +//// } +//// } +//// } +//// } +//// } +//// } +//// } +//// } +//// return "" +////} diff --git a/market/mk20/pdp_v1.go b/market/mk20/pdp_v1.go new file mode 100644 index 000000000..242035e23 --- /dev/null +++ b/market/mk20/pdp_v1.go @@ -0,0 +1,149 @@ +package mk20 + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" +) + +// PDPV1 represents configuration for product-specific PDP version 1 deals. +type PDPV1 struct { + // CreateDataSet indicated that this deal is meant to create a new DataSet for the client by storage provider. + CreateDataSet bool `json:"create_data_set"` + + // DeleteDataSet indicated that this deal is meant to delete an existing DataSet created by SP for the client. + // DataSetID must be defined. + DeleteDataSet bool `json:"delete_data_set"` + + // AddPiece indicated that this deal is meant to add Piece to a given DataSet. DataSetID must be defined. + AddPiece bool `json:"add_piece"` + + // DeletePiece indicates whether the Piece of the data should be deleted. DataSetID must be defined. + DeletePiece bool `json:"delete_piece"` + + // DataSetID is PDP verified contract dataset ID. It must be defined for all deals except when CreateDataSet is true. + DataSetID *uint64 `json:"data_set_id,omitempty"` + + // RecordKeeper specifies the record keeper contract address for the new PDP dataset. + RecordKeeper string `json:"record_keeper"` + + // PieceIDs is a list of Piece ids in a proof set. + PieceIDs []uint64 `json:"piece_ids,omitempty"` + + // ExtraData can be used to send additional information to service contract when Verifier action like AddRoot, DeleteRoot etc. are performed. + ExtraData []byte `json:"extra_data,omitempty"` +} + +func (p *PDPV1) Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, error) { + code, err := IsProductEnabled(db, p.ProductName()) + if err != nil { + return code, err + } + + if ok := p.CreateDataSet || p.DeleteDataSet || p.AddPiece || p.DeletePiece; !ok { + return ErrBadProposal, xerrors.Errorf("deal must have one of the following flags set: create_data_set, delete_data_set, add_piece, delete_piece") + } + + var existingAddress bool + + err = db.QueryRow(context.Background(), `SELECT EXISTS(SELECT 1 FROM eth_keys WHERE role = 'pdp')`).Scan(&existingAddress) + if err != nil { + return ErrServerInternalError, xerrors.Errorf("checking if pdp address exists: %w", err) + } + + if !existingAddress { + return ErrServiceMaintenance, xerrors.Errorf("pdp key not configured by storage provider") + } + + if p.CreateDataSet { + if p.DataSetID != nil { + return ErrBadProposal, xerrors.Errorf("create_proof_set cannot be set with data_set_id") + } + if p.RecordKeeper == "" { + return ErrBadProposal, xerrors.Errorf("record_keeper must be defined for create_proof_set") + } + if !common.IsHexAddress(p.RecordKeeper) { + return ErrBadProposal, xerrors.Errorf("record_keeper must be a valid address") + } + } + + // Only 1 action is allowed per deal + if btoi(p.CreateDataSet)+btoi(p.DeleteDataSet)+btoi(p.AddPiece)+btoi(p.DeletePiece) > 1 { + return ErrBadProposal, xerrors.Errorf("only one action is allowed per deal") + } + + ctx := context.Background() + + if p.DeleteDataSet { + if p.DataSetID == nil { + return ErrBadProposal, xerrors.Errorf("delete_proof_set must have data_set_id defined") + } + pid := *p.DataSetID + var exists bool + err := db.QueryRow(ctx, `SELECT EXISTS(SELECT 1 FROM pdp_data_set WHERE id = $1 AND removed = FALSE)`, pid).Scan(&exists) + if err != nil { + return ErrServerInternalError, xerrors.Errorf("checking if dataset exists: %w", err) + } + if !exists { + return ErrBadProposal, xerrors.Errorf("dataset does not exist for the client") + } + } + + if p.AddPiece { + if p.DataSetID == nil { + return ErrBadProposal, xerrors.Errorf("add_root must have data_set_id defined") + } + pid := *p.DataSetID + var exists bool + err := db.QueryRow(ctx, `SELECT EXISTS(SELECT 1 FROM pdp_data_set WHERE id = $1 AND removed = FALSE)`, pid).Scan(&exists) + if err != nil { + return ErrServerInternalError, xerrors.Errorf("checking if dataset exists: %w", err) + } + if !exists { + return ErrBadProposal, xerrors.Errorf("dataset does not exist for the client") + } + } + + if p.DeletePiece { + if p.DataSetID == nil { + return ErrBadProposal, xerrors.Errorf("delete_root must have data_set_id defined") + } + pid := *p.DataSetID + if len(p.PieceIDs) == 0 { + return ErrBadProposal, xerrors.Errorf("root_ids must be defined for delete_proof_set") + } + var exists bool + err := db.QueryRow(ctx, `SELECT COUNT(*) = cardinality($2::BIGINT[]) AS all_exist_and_active + FROM pdp_dataset_piece r + JOIN pdp_data_set s ON r.data_set_id = s.id + WHERE r.data_set_id = $1 + AND r.piece = ANY($2) + AND r.removed = FALSE + AND s.removed = FALSE;`, pid, p.PieceIDs).Scan(&exists) + if err != nil { + return ErrServerInternalError, xerrors.Errorf("checking if dataset and pieces exists: %w", err) + } + if !exists { + return ErrBadProposal, xerrors.Errorf("dataset or one of the pieces does not exist for the client") + } + } + + return Ok, nil +} + +func btoi(b bool) int { + if b { + return 1 + } + return 0 +} + +func (p *PDPV1) ProductName() ProductName { + return ProductNamePDPV1 +} + +var _ product = &PDPV1{} diff --git a/market/mk20/retrieval_v1.go b/market/mk20/retrieval_v1.go new file mode 100644 index 000000000..5fa69ac9f --- /dev/null +++ b/market/mk20/retrieval_v1.go @@ -0,0 +1,39 @@ +package mk20 + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" +) + +// RetrievalV1 defines a structure for managing retrieval settings +type RetrievalV1 struct { + // Indexing indicates if the deal is to be indexed in the provider's system to support CIDs based retrieval + Indexing bool `json:"indexing"` + + // AnnouncePayload indicates whether the payload should be announced to IPNI. + AnnouncePayload bool `json:"announce_payload"` + + // AnnouncePiece indicates whether the piece information should be announced to IPNI. + AnnouncePiece bool `json:"announce_piece"` +} + +func (r *RetrievalV1) Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, error) { + code, err := IsProductEnabled(db, r.ProductName()) + if err != nil { + return code, err + } + + if !r.Indexing && r.AnnouncePayload { + return ErrProductValidationFailed, xerrors.Errorf("deal cannot be announced to IPNI without indexing") + } + + return Ok, nil +} + +func (r *RetrievalV1) ProductName() ProductName { + return ProductNameRetrievalV1 +} + +var _ product = &RetrievalV1{} diff --git a/market/mk20/tsclient/.gitignore b/market/mk20/tsclient/.gitignore new file mode 100644 index 000000000..75b8228bf --- /dev/null +++ b/market/mk20/tsclient/.gitignore @@ -0,0 +1,57 @@ +# Dependencies +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Build outputs +dist/ +generated/ + +# TypeScript +*.tsbuildinfo + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Logs +logs +*.log + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Coverage directory used by tools like istanbul +coverage/ + +# nyc test coverage +.nyc_output + +# Dependency directories +jspm_packages/ + +# Optional npm cache directory +.npm + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test diff --git a/market/mk20/tsclient/Makefile b/market/mk20/tsclient/Makefile new file mode 100644 index 000000000..2bed9ba15 --- /dev/null +++ b/market/mk20/tsclient/Makefile @@ -0,0 +1,33 @@ +.PHONY: help install generate compile build clean test + +help: ## Show this help message + @echo 'Usage: make [target]' + @echo '' + @echo 'Targets:' + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-15s %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +install: ## Install dependencies + npm install + +generate: ## Generate TypeScript client from swagger files + npm run generate + +compile: ## Compile TypeScript to JavaScript + npm run compile + +build: ## Build everything (generate + compile) + npm run build + +clean: ## Clean build artifacts + npm run clean + +test: ## Run tests (placeholder for future test setup) + @echo "Tests not yet implemented" + +dev: ## Development mode - watch for changes and rebuild + @echo "Starting development mode..." + @echo "Run 'make build' to rebuild after changes" + +.PHONY: setup +setup: install generate compile ## Initial setup: install deps, generate client, and compile + @echo "Setup complete! Run 'make build' to rebuild in the future." diff --git a/market/mk20/tsclient/PROJECT_STRUCTURE.md b/market/mk20/tsclient/PROJECT_STRUCTURE.md new file mode 100644 index 000000000..a44ead9e9 --- /dev/null +++ b/market/mk20/tsclient/PROJECT_STRUCTURE.md @@ -0,0 +1,79 @@ +# Project Structure + +``` +tsclient/ +├── src/ # Source code +│ ├── index.ts # Main exports +│ └── client.ts # Custom client wrapper +├── tests/ # Test files +│ ├── setup.ts # Test configuration +│ └── client.test.ts # Client tests +├── examples/ # Usage examples +│ └── basic-usage.ts # Basic client usage +├── scripts/ # Build scripts +│ └── build.sh # Automated build script +├── generated/ # Auto-generated client (from swagger) +├── dist/ # Compiled output +├── package.json # Dependencies and scripts +├── tsconfig.json # TypeScript configuration +├── jest.config.js # Jest test configuration +├── Makefile # Build targets +├── .gitignore # Git ignore rules +├── README.md # Main documentation +└── PROJECT_STRUCTURE.md # This file +``` + +## Key Components + +### 1. Generated Client (`generated/`) +- **Source**: Generated from `../http/swagger.json` +- **Tool**: OpenAPI Generator CLI +- **Language**: TypeScript with fetch API +- **Purpose**: Provides the raw API interface + +### 2. Custom Client Wrapper (`src/client.ts`) +- **Purpose**: User-friendly interface over generated client +- **Features**: + - Simplified method names + - Better error handling + - Consistent return types + - Type safety + +### 3. Main Exports (`src/index.ts`) +- **Purpose**: Clean public API +- **Exports**: + - Generated types and client + - Custom client wrapper + - Configuration interfaces + +### 4. Build System +- **Package Manager**: npm +- **Build Tool**: TypeScript compiler +- **Code Generation**: OpenAPI Generator +- **Testing**: Jest +- **Automation**: Makefile + shell scripts + +## Build Process + +1. **Generate**: `swagger.json` → TypeScript client +2. **Compile**: TypeScript → JavaScript +3. **Package**: Output to `dist/` directory + +## Development Workflow + +1. **Setup**: `make setup` or `./scripts/build.sh` +2. **Development**: Edit source files in `src/` +3. **Regenerate**: `npm run generate` (when API changes) +4. **Build**: `npm run build` or `make build` +5. **Test**: `npm test` or `make test` + +## File Purposes + +- **`package.json`**: Dependencies, scripts, metadata +- **`tsconfig.json`**: TypeScript compiler options +- **`jest.config.js`**: Test framework configuration +- **`Makefile`**: Build automation targets +- **`build.sh`**: Automated build script +- **`.gitignore`**: Version control exclusions +- **`README.md`**: User documentation +- **`PROJECT_STRUCTURE.md`**: This file (developer reference) diff --git a/market/mk20/tsclient/README.md b/market/mk20/tsclient/README.md new file mode 100644 index 000000000..e496db0ac --- /dev/null +++ b/market/mk20/tsclient/README.md @@ -0,0 +1,498 @@ +# Curio TypeScript Market Client + +This is a TypeScript API client for the Curio storage market API. It provides a strongly-typed interface for interacting with Curio storage providers. + +## Installation + +```bash +npm install @curiostorage/market-client +``` + +## Prerequisites + +**Authentication is required** for all API operations. You must configure authentication before using the client. + +### Authentication Methods + +The client supports two authentication methods: + +1. **Ed25519** (default) - Uses Ed25519 key pairs +2. **Secp256k1** - Uses Secp256k1 key pairs (compatible with Ethereum wallets) + +### Authentication Configuration + +Authentication can be configured programmatically or via environment variables (used in examples): + +**Programmatic Configuration:** +```typescript +const authConfig = { + serverUrl: 'https://your-server.com', + clientAddr: 'f1client...', + recordKeeper: 't1000', // Required for PDPv1 deals + contractAddress: '0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6', + keyType: 'ed25519' as 'ed25519' | 'secp256k1', + publicKeyB64: 'your_base64_public_key', + privateKeyB64: 'your_base64_private_key', + // OR for secp256k1: + // secpPrivateKeyHex: 'your_hex_private_key', + // secpPrivateKeyB64: 'your_base64_private_key', +}; +``` + +**Environment Variables (for examples):** +```bash +# Used in the example scripts +export PDP_URL=https://your-server.com +export PDP_CLIENT=f1client... +export PDP_RECORD_KEEPER=t1000 +export PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 +export PDP_KEY_TYPE=ed25519 +export PDP_PUBLIC_KEY_B64=your_base64_public_key +export PDP_PRIVATE_KEY_B64=your_base64_private_key +``` + +**Running Example Scripts:** +```bash +# Example: Running step 1 with all environment variables inline +PDP_INSECURE_TLS=1 \ +PDP_URL="https://your-server.com" \ +PDP_CLIENT=f1client... \ +PDP_KEY_TYPE=secp256k1 \ +PDP_SECP_PRIVATE_KEY_B64="your_base64_private_key" \ +PDP_CONTRACT="0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6" \ +PDP_RECORD_KEEPER="0x158c8f05A616403589b99BE5d82d756860363A92" \ +DATASET_ID="01K4TKYS9302Y42BRBT0V0S389" \ +npx ts-node 1.ts +``` + +You'll need a wallet: + lotus wallet new delegated + +You can get your private key (for demo only) from: + lotus wallet export | xxd -r -p | jq -r ‘.PrivateKey’ | base64 -d | xxd -p -c 32 + +Be sure to setup Curio with: +- ui —> pdp —> ownerAddress —> (hex key) +- Your curio also needs storage attached: + -- ./curio cli storage attach -snap -seal /home/ubuntu/curiofolder + -- And a market enabled, such as taking the following with ./curio config set market.yaml +market.yaml: +[Batching] + [Batching.Commit] + Timeout = "0h0m5s" + [Batching.PreCommit] + Slack = "6h0m0s" + Timeout = "0h0m5s" + +[HTTP] + DelegateTLS = false + DomainName = "yourserver.yourdomain.com" + Enable = true + ListenAddress = "0.0.0.0:443" + +[Ingest] + MaxDealWaitTime = "0h0m30s" + +[Market] + [Market.StorageMarketConfig] + [Market.StorageMarketConfig.MK12] + ExpectedPoRepSealDuration = "0h1m0s" + ExpectedSnapSealDuration = "0h1m0s" + PublishMsgPeriod = "0h0m10s" + +[Subsystems] + EnableCommP = true + EnableDealMarket = true + EnablePDP = true + EnableParkPiece = true + + +## Building from Source + +1. Install dependencies: +```bash +npm install +``` + +2. Generate the client from swagger files: +```bash +npm run generate +``` + +3. Compile TypeScript: +```bash +npm run compile +``` + +4. Or build everything at once: +```bash +npm run build +``` + +## Usage + +```typescript +import { MarketClient, PieceCidUtils, AuthUtils } from '@curiostorage/market-client'; + +// Configure authentication programmatically +const authConfig = { + serverUrl: 'https://your-server.com', + clientAddr: 'f1client...', + recordKeeper: 't1000', // Required for PDPv1 deals + contractAddress: '0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6', + keyType: 'ed25519' as 'ed25519' | 'secp256k1', + publicKeyB64: 'your_base64_public_key', + privateKeyB64: 'your_base64_private_key', +}; + +// Build authentication header +const authHeader = await AuthUtils.buildAuthHeader(authConfig); + +// Create authenticated client +const client = new MarketClient({ + serverUrl: authConfig.serverUrl, + authHeader +}); + +// Get supported contracts +const contracts = await client.getContracts(); + +// Get supported products +const products = await client.getProducts(); + +// Get supported data sources +const sources = await client.getSources(); + +// Get deal status +const status = await client.getStatus('deal-id-here'); + +// Submit a deal +const deal = { + // ... deal configuration +}; +const result = await client.submitDeal(deal); + +// Upload data (single request - suitable for small deals) +await client.uploadData('deal-id', [1, 2, 3, 4]); + +// Chunked upload (suitable for large deals) +await client.initializeChunkedUpload('deal-id', startUploadData); +await client.uploadChunk('deal-id', '0', chunkData); +await client.uploadChunk('deal-id', '1', chunkData); +await client.finalizeChunkedUpload('deal-id'); + +// Check upload status +const uploadStatus = await client.getUploadStatus('deal-id'); + +// Compute piece CID v2 from blobs +const blobs = [new Blob(['file content'])]; +const pieceCid = await PieceCidUtils.computePieceCidV2(blobs); + +// Convenience wrappers for common workflows (includes automatic chunked upload) +const result = await client.submitPDPv1DealWithUpload({ + blobs: [new Blob(['file content'])], + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...' +}); + +// DDO deals with custom duration (includes automatic chunked upload) +const ddoResult = await client.submitDDOV1DealWithUpload({ + blobs: [new Blob(['file content'])], + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...', + // Optional lifespan (epochs); defaults to 518400 if omitted + lifespan: 600000 +}); + +// Results include upload statistics +console.log('Uploaded chunks:', result.uploadedChunks); +console.log('Uploaded bytes:', result.uploadedBytes); +``` + +## Streaming PDP (no upfront data section) + +Create a deal without a `data` section, stream data using `uploadChunk`, compute the piece CID while streaming, then finalize with the computed `data`: + +```typescript +import { Client, MarketClientConfig, AuthUtils } from '@curiostorage/market-client'; + +// Configure authentication programmatically +const authConfig = { + serverUrl: 'https://your-server.com', + clientAddr: 'f1client...', + recordKeeper: 't1000', // Required for PDPv1 deals + contractAddress: '0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6', + keyType: 'ed25519' as 'ed25519' | 'secp256k1', + publicKeyB64: 'your_base64_public_key', + privateKeyB64: 'your_base64_private_key', +}; + +const authHeader = await AuthUtils.buildAuthHeader(authConfig); + +const config: MarketClientConfig = { + serverUrl: authConfig.serverUrl, + authHeader +}; +const client = new Client(config); + +// Create the streaming helper (defaults to 1MB chunks) +const spdp = client.streamingPDP({ + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...', + // chunkSize: 2 * 1024 * 1024, // optional +}); + +// Begin: submits deal without data and initializes chunked upload +await spdp.begin(); + +// Stream bytes (these are uploaded as chunks and hashed for CID) +spdp.write(new TextEncoder().encode('hello ')); +spdp.write(new TextEncoder().encode('world')); + +// Commit: flushes remaining chunk, computes piece CID, and finalizes with data +const { id, pieceCid, totalSize } = await spdp.commit(); +console.log({ id, pieceCid, totalSize }); +``` + +## Product Types + +The client supports three main product types for different use cases: + +### PDPv1 (Proof of Data Possession) +Used for creating datasets and proving data possession: +```typescript +products: { + pdpV1: { + createDataSet: true, // Create new dataset + recordKeeper: 'provider-address', + pieceIds: [123, 456, 789] // Piece IDs for each individual blob + }, + retrievalV1: { + announcePayload: true, // Announce to IPNI + announcePiece: true, // Announce piece info + indexing: true // Enable retrieval + } +} +``` +then without createDataSet & with: + addPiece: true, // Add piece to dataset + +### DDOv1 (Direct Data Onboarding) +Used for direct data onboarding with contract verification: +```typescript +products: { + ddoV1: { + duration: 518400, // Typically chosen per-deal (lifespan) + provider: { address: 'provider-address' }, + contractAddress: '0x...', + contractVerifyMethod: 'verifyDeal' + }, + retrievalV1: { + announcePayload: true, + announcePiece: true, + indexing: true + } +} +``` + +### RetrievalV1 +Configures retrieval behavior and indexing: +```typescript +retrievalV1: { + announcePayload: true, // Announce payload to IPNI + announcePiece: true, // Announce piece information + indexing: true // Index for CID-based retrieval +} +``` + +## API Endpoints + +- `GET /contracts` - List supported DDO contracts +- `GET /products` - List supported products +- `GET /sources` - List supported data sources +- `GET /status/{id}` - Get deal status +- `POST /store` - Submit a new deal +- `PUT /upload/{id}` - Upload deal data (single request) +- `POST /upload/{id}` - Initialize chunked upload +- `PUT /uploads/{id}/{chunkNum}` - Upload a chunk +- `POST /uploads/finalize/{id}` - Finalize chunked upload +- `GET /uploads/{id}` - Get upload status + +## Automatic Chunked Upload + +The convenience wrappers automatically handle chunked uploads after deal submission: + +- **Automatic Processing**: After submitting a deal, all blobs are automatically uploaded in chunks +- **Configurable Chunk Size**: Uses 1MB chunks by default for optimal performance +- **Progress Tracking**: Provides detailed logging of upload progress +- **Complete Workflow**: Handles initialization, chunking, upload, and finalization +- **Upload Statistics**: Returns total chunks and bytes uploaded +- **Simple & Reliable**: Sequential uploads ensure data integrity and predictable behavior + +```typescript +const result = await client.submitPDPv1DealWithUpload({ + blobs: [blob1, blob2, blob3], + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...' +}); + +// The result includes upload statistics +console.log('Uploaded chunks:', result.uploadedChunks); // Total number of chunks +console.log('Uploaded bytes:', result.uploadedBytes); // Total bytes uploaded +``` + +## Piece ID Calculation + +The client automatically calculates unique piece IDs for each blob in a deal: + +- **Individual Blob Piece IDs**: Each blob gets a unique piece ID based on its content hash and size +- **Deterministic**: The same blob content will always generate the same piece ID +- **Consistent**: Both PDPv1 and DDOv1 deals use the same piece ID calculation method +- **Returned**: Piece IDs are included in the deal creation and returned by convenience wrappers + +```typescript +// Each blob gets its own piece ID +const result = await client.submitPDPv1DealWithUpload({ + blobs: [blob1, blob2, blob3], + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...' +}); + +console.log('Piece IDs:', result.pieceIds); // [123, 456, 789] +console.log('Blob 1 → Piece ID:', result.pieceIds[0]); // 123 +console.log('Blob 2 → Piece ID:', result.pieceIds[1]); // 456 +console.log('Blob 3 → Piece ID:', result.pieceIds[2]); // 789 +``` + +## Piece CID Computation + +The client includes utilities for computing Filecoin piece CIDs using the [js-multiformats library](https://github.com/multiformats/js-multiformats): + +### `PieceCidUtils.computePieceCidV2(blobs: Blob[])` +Computes a piece CID v2 from an array of blobs by: +1. Concatenating all blob data +2. Computing SHA2-256 hash +3. Creating a CID v1 with raw codec +4. Converting to piece CID v2 format + +### `PieceCidUtils.pieceCidV2FromV1(cid: CID, payloadSize: number)` +Converts an existing CID v1 to piece CID v2 format, supporting: +- Filecoin unsealed commitments (SHA2-256) +- Filecoin sealed commitments (Poseidon) +- Raw data codecs + +## Troubleshooting + +### Common Authentication Issues + +**Error: "REQUIRED ENVIRONMENT VARIABLE MISSING: PDP_RECORD_KEEPER"** +- This error only appears when running the example scripts +- The `PDP_RECORD_KEEPER` environment variable is required for the examples +- Set it with: `export PDP_RECORD_KEEPER=your-record-keeper-address` +- For programmatic usage, include `recordKeeper` in your auth config + +**Error: "Authentication failed" or "Invalid signature"** +- Verify your private key is correctly formatted (base64 or hex) +- Ensure the key type matches your key format (`ed25519` or `secp256k1`) +- Check that the public key corresponds to the private key + +**Error: "TLS verification failed"** +- For debugging only, you can disable TLS verification with `export PDP_INSECURE_TLS=1` +- **Warning**: Never use this in production + +**Error: "Connection refused" or "Network error"** +- Verify the `PDP_URL` is correct and accessible +- Check that the server is running and accepting connections +- Ensure firewall settings allow the connection + +### Key Generation + +**Ed25519 Key Generation:** +```bash +# Generate Ed25519 key pair +node -e " +const crypto = require('crypto'); +const keyPair = crypto.generateKeyPairSync('ed25519'); +console.log('Public key (base64):', keyPair.publicKey.export({ type: 'spki', format: 'der' }).toString('base64')); +console.log('Private key (base64):', keyPair.privateKey.export({ type: 'pkcs8', format: 'der' }).toString('base64')); +" +``` + +**Secp256k1 Key Generation:** +```bash +# Generate Secp256k1 key pair +node -e " +const crypto = require('crypto'); +const keyPair = crypto.generateKeyPairSync('ec', { namedCurve: 'secp256k1' }); +console.log('Private key (hex):', keyPair.privateKey.export({ type: 'sec1', format: 'der' }).toString('hex')); +console.log('Private key (base64):', keyPair.privateKey.export({ type: 'sec1', format: 'der' }).toString('base64')); +" +``` + +### Environment Variable Validation + +The example client validates your configuration: + +```typescript +import { AuthUtils } from '@curiostorage/market-client'; + +const authConfig = { + // ... your config +}; + +try { + const authHeader = await AuthUtils.buildAuthHeader(authConfig); + console.log('✅ Authentication configuration is valid'); +} catch (error) { + console.error('❌ Authentication configuration error:', error.message); +} +``` + +## Examples + +See the `examples/unpkg-end-to-end/` directory for a complete step-by-step workflow that demonstrates: + +- Authentication setup and configuration (using environment variables) +- Creating PDPv1 datasets +- Adding pieces to datasets +- Uploading data with chunked uploads +- Downloading pieces +- Deleting datasets and pieces + +Each step is documented and can be run independently, making it easy to understand the complete workflow. The examples use environment variables for configuration, but you can adapt the code to use programmatic configuration instead. + +**Quick Start with Examples:** +```bash +# Set your configuration +export PDP_URL="https://your-server.com" +export PDP_CLIENT="f1client..." +export PDP_RECORD_KEEPER="0x158c8f05A616403589b99BE5d82d756860363A92" +export PDP_CONTRACT="0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6" +export PDP_KEY_TYPE="secp256k1" +export PDP_SECP_PRIVATE_KEY_B64="your_base64_private_key" + +# Run the complete workflow +cd examples/unpkg-end-to-end/ +npx ts-node 1.ts # Create dataset +npx ts-node 2.ts # Add piece and upload +npx ts-node 3.ts # Download piece +npx ts-node 4.ts # Delete +``` + +## Development + +The client is generated from the OpenAPI/Swagger specification in `../http/swagger.json`. To regenerate after API changes: + +```bash +npm run generate +npm run compile +``` + +## License + +MIT diff --git a/market/mk20/tsclient/examples/piece-cid-computation.ts b/market/mk20/tsclient/examples/piece-cid-computation.ts new file mode 100644 index 000000000..585052042 --- /dev/null +++ b/market/mk20/tsclient/examples/piece-cid-computation.ts @@ -0,0 +1,132 @@ +import { CurioMarket } from '../src'; + +// Example: Compute piece CID v2 from blobs +async function computePieceCidExample() { + try { + console.log('🔍 Computing piece CID v2 from blobs...\n'); + + // Create mock blobs (in real usage, these would be actual files) + const mockBlobs = [ + new Blob(['Hello, this is file 1 content'], { type: 'text/plain' }), + new Blob(['This is file 2 with different content'], { type: 'text/plain' }), + new Blob(['And here is file 3 content'], { type: 'text/plain' }) + ]; + + console.log('📁 Input blobs:'); + mockBlobs.forEach((blob, index) => { + console.log(` File ${index + 1}: ${blob.size} bytes`); + }); + + // Compute piece CID v2 + const pieceCid = CurioMarket.calculatePieceCID(mockBlobs); + + console.log('\n✅ Piece CID v2 computed successfully!'); + console.log(`🔗 Piece CID: ${pieceCid}`); + console.log(`📊 Total size: ${mockBlobs.reduce((sum, blob) => sum + blob.size, 0)} bytes`); + + return pieceCid; + + } catch (error) { + console.error('❌ Failed to compute piece CID:', error); + throw error; + } +} + +// Example: Convert existing CID v1 to piece CID v2 +async function convertCidV1ToV2Example() { + try { + console.log('\n🔄 Converting CID v1 to piece CID v2...\n'); + + // Create a mock CID v1 (in practice, this would come from somewhere) + const { CID } = await import('multiformats/cid'); + const { sha256 } = await import('multiformats/hashes/sha2'); + + const mockData = new TextEncoder().encode('Sample data for CID computation'); + const hash = await sha256.digest(mockData); + const cidV1 = CID.create(1, 0x55, hash); // raw codec + + console.log(`📥 Input CID v1: ${cidV1.toString()}`); + console.log(`🔍 Codec: ${cidV1.code}`); + console.log(`🔍 Hash: ${cidV1.multihash.name}`); + + // Convert to piece CID v2 using the better implementation + const pieceCidV2 = CurioMarket.calculatePieceCID(mockData); + + console.log('\n✅ Conversion successful!'); + console.log(`📤 Output piece CID v2: ${pieceCidV2.toString()}`); + console.log(`🔍 Output codec: ${pieceCidV2.code}`); + console.log(`🔍 Output hash: ${pieceCidV2.multihash.name}`); + + return pieceCidV2; + + } catch (error) { + console.error('❌ Failed to convert CID:', error); + throw error; + } +} + +// Example: Handle different blob types and sizes +async function handleDifferentBlobTypesExample() { + try { + console.log('\n🎭 Handling different blob types and sizes...\n'); + + const blobs = [ + new Blob(['Small text file'], { type: 'text/plain' }), + new Blob(['Medium sized content here'], { type: 'text/plain' }), + new Blob(['Large content with many characters to make it bigger'], { type: 'text/plain' }), + new Blob(['Another file with content'], { type: 'text/plain' }) + ]; + + console.log('📁 Blob details:'); + blobs.forEach((blob, index) => { + console.log(` Blob ${index + 1}: ${blob.size} bytes, type: ${blob.type}`); + }); + + // Compute piece CID v2 + const pieceCid = await CurioMarket.PieceCidUtils.computePieceCidV2(blobs); + + console.log('\n✅ Piece CID computed for mixed blob types!'); + console.log(`🔗 Piece CID: ${pieceCid}`); + console.log(`📊 Total size: ${blobs.reduce((sum, blob) => sum + blob.size, 0)} bytes`); + + return pieceCid; + + } catch (error) { + console.error('❌ Failed to handle different blob types:', error); + throw error; + } +} + +// Example: Error handling for invalid inputs +async function errorHandlingExample() { + try { + console.log('\n⚠️ Testing error handling...\n'); + + // Test with empty blob array + try { + await CurioMarket.PieceCidUtils.computePieceCidV2([]); + console.log('❌ Should have thrown error for empty blobs'); + } catch (error) { + console.log('✅ Correctly handled empty blob array:', error.message); + } + + // Test with invalid data + try { + const invalidData = new Uint8Array(0); // Empty data + CurioMarket.calculatePieceCID(invalidData); + console.log('❌ Should have thrown error for invalid data'); + } catch (error) { + console.log('✅ Correctly handled invalid data:', error.message); + } + + } catch (error) { + console.error('❌ Error handling test failed:', error); + } +} + +export { + computePieceCidExample, + convertCidV1ToV2Example, + handleDifferentBlobTypesExample, + errorHandlingExample +}; diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/1.ts b/market/mk20/tsclient/examples/unpkg-end-to-end/1.ts new file mode 100644 index 000000000..a300bec0a --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/1.ts @@ -0,0 +1,94 @@ +// Step 1: Create Dataset +// This step creates a PDPv1 dataset (first part of startPDPv1DealForUpload) +// Set before running: +// PDP_URL=https://andyserver.thepianoexpress.com +// PDP_CLIENT=t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai // client wallet +// PDP_PUBLIC_KEY_B64=base64_of_raw_public_key_32_bytes # ed25519 mode +// PDP_PRIVATE_KEY_B64=base64_of_secret_key_64_or_seed_32 # ed25519 mode +// PDP_KEY_TYPE=ed25519|secp256k1 # default ed25519 +// PDP_SECP_PRIVATE_KEY_HEX=... or PDP_SECP_PRIVATE_KEY_B64=... +// PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 + +import { getAuthConfigFromEnv, buildAuthHeader, createClient, sanitizeAuthHeader, runPreflightChecks } from './auth'; +import { CurioMarket } from '../../src'; +import { ulid } from 'ulid'; + +async function sleep(ms: number) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +async function run() { + console.log('🚀 Step 1: Creating PDPv1 Dataset'); + console.log(' This is the first step and requires no inputs.'); + + // Get configuration from environment + const config = getAuthConfigFromEnv(); + console.log('Configuration loaded from environment'); + + // Build authentication + const authHeader = await buildAuthHeader(config); + console.log('Auth header (sanitized):', sanitizeAuthHeader(authHeader)); + console.log('Server URL:', config.serverUrl); + + // Create authenticated client + const client = createClient(config, authHeader); + + // Run preflight connectivity checks + console.log('🔍 Running preflight connectivity checks...'); + await runPreflightChecks(config, authHeader); + + // Create dataset with a fresh identifier (first part of startPDPv1DealForUpload) + console.log('📝 Creating PDPv1 dataset...'); + const datasetId = ulid(); + const createDeal: CurioMarket.Deal = { + identifier: datasetId, + client: config.clientAddr, + products: { + pdpV1: { + createDataSet: true, + addPiece: false, + recordKeeper: config.recordKeeper, + extraData: [], + deleteDataSet: false, + deletePiece: false, + } as CurioMarket.PDPV1, + } as CurioMarket.Products, + } as CurioMarket.Deal; + + // Submit the dataset creation deal + console.log('📤 Submitting dataset creation deal...'); + await client.submitDeal(createDeal); + console.log(` Dataset creation deal submitted with ID: ${datasetId}`); + + // Wait for dataset creation to complete + console.log('⏳ Waiting for dataset creation to complete...'); + for (let i = 0; i < 60; i++) { // up to ~5 minutes with 5s interval + const status = await client.getStatus(datasetId); + const pdp = status.pdpV1; + const st = pdp?.status; + console.log(` Dataset status: ${st}${pdp?.errorMsg ? `, error: ${pdp.errorMsg}` : ''}`); + if (st === 'complete' || st === 'failed') break; + await sleep(5000); + } + + console.log('✅ Step 1 completed: PDPv1 dataset created'); + console.log(` - Dataset ID: ${datasetId}`); + console.log(` - Client: ${config.clientAddr}`); + console.log(` - Record Keeper: ${config.recordKeeper}`); + console.log(''); + console.log('Next: Run 2.ts to add piece to the dataset'); + + return { + datasetId, + config, + }; +} + +if (require.main === module) { + run().catch(err => { + console.error('Step 1 failed:', err); + process.exit(1); + }); +} + +export { run as startDeal }; diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/2.ts b/market/mk20/tsclient/examples/unpkg-end-to-end/2.ts new file mode 100644 index 000000000..81d0879b0 --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/2.ts @@ -0,0 +1,214 @@ +// Step 2: Add Piece and Upload Blobs +// This step adds a piece to the dataset, downloads React.js, and uploads it +// Set before running: +// PDP_URL=https://andyserver.thepianoexpress.com +// PDP_CLIENT=t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai // client wallet +// PDP_PUBLIC_KEY_B64=base64_of_raw_public_key_32_bytes # ed25519 mode +// PDP_PRIVATE_KEY_B64=base64_of_secret_key_64_or_seed_32 # ed25519 mode +// PDP_KEY_TYPE=ed25519|secp256k1 # default ed25519 +// PDP_SECP_PRIVATE_KEY_HEX=... or PDP_SECP_PRIVATE_KEY_B64=... +// PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 + +import { getAuthConfigFromEnv, buildAuthHeader, createClient, sanitizeAuthHeader, runPreflightChecks } from './auth'; +import { CurioMarket } from '../../src'; +import { ulid } from 'ulid'; + +async function sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +async function run(datasetId?: string) { + console.log('📁 Step 2: Adding Piece and Uploading Blobs'); + console.log(' REQUIRED INPUT: Dataset ID from Step 1'); + console.log(' This step downloads React.js, adds piece to dataset, and uploads it'); + + // Get configuration from environment + const config = getAuthConfigFromEnv(); + console.log('Configuration loaded from environment'); + + // Build authentication + const authHeader = await buildAuthHeader(config); + console.log('Auth header (sanitized):', sanitizeAuthHeader(authHeader)); + console.log('Server URL:', config.serverUrl); + + // Create authenticated client + const client = createClient(config, authHeader); + + // Run preflight connectivity checks + console.log('🔍 Running preflight connectivity checks...'); + await runPreflightChecks(config, authHeader); + + // Use provided datasetId or get from environment + const targetDatasetId = datasetId || process.env.DATASET_ID; + if (!targetDatasetId) { + console.error('❌ REQUIRED INPUT MISSING: Dataset ID'); + console.error(' This step requires a dataset ID from Step 1.'); + console.error(' Either pass as parameter: run("dataset-id")'); + console.error(' Or set environment variable: export DATASET_ID=your-dataset-id'); + console.error(''); + console.error(' To get the dataset ID, run Step 1 first:'); + console.error(' npx ts-node 1.ts'); + throw new Error('REQUIRED INPUT MISSING: Dataset ID from Step 1'); + } + console.log(` Using dataset ID: ${targetDatasetId}`); + + // Download React.js from unpkg + console.log('📥 Downloading React.js from unpkg...'); + const url = 'https://unpkg.com/react@18.2.0/umd/react.production.min.js'; + const response = await fetch(url); + if (!response.ok) { + throw new Error(`Failed to download React.js: ${response.status} ${response.statusText}`); + } + + const bytes = new Uint8Array(await response.arrayBuffer()); + const blob = new Blob([Buffer.from(bytes)], { type: 'application/octet-stream' }); + console.log(` Downloaded React.js: ${bytes.length} bytes`); + console.log(` Blob size: ${blob.size} bytes`); + + // Compute piece CID + console.log('🔗 Computing piece CID...'); + const pieceCid = await CurioMarket.PieceCidUtils.computePieceCidV2([blob]); + console.log(` Piece CID: ${pieceCid}`); + + // Add piece with data under a new identifier (upload id) + console.log('📝 Creating add piece deal...'); + const uploadId = ulid(); + const addPieceDeal: CurioMarket.Deal = { + identifier: uploadId, + client: config.clientAddr, + data: { + pieceCid: { "/": pieceCid } as object, + format: { raw: {} }, + sourceHttpPut: {}, + } as CurioMarket.DataSource, + products: { + pdpV1: { + addPiece: true, + dataSetId: 0, // TODO: get dataset id from response (hardcoded for now) + recordKeeper: config.recordKeeper, + extraData: [], + deleteDataSet: false, + deletePiece: false, + } as CurioMarket.PDPV1, + retrievalV1: { + announcePayload: false, + announcePiece: true, + indexing: false, + } as CurioMarket.RetrievalV1, + } as CurioMarket.Products, + } as CurioMarket.Deal; + + // Submit the add piece deal + console.log('📤 Submitting add piece deal...'); + const dealId = await client.submitDeal(addPieceDeal); + console.log(` Add piece deal submitted with ID: ${uploadId}, deal ID: ${dealId}`); + + if (!addPieceComplete) { + console.log(' ⏰ Add piece status polling timed out after 60 seconds'); + console.log(' 🔗 Please check the blockchain for deal status:'); + console.log(` - Upload ID: ${uploadId}`); + console.log(` - Deal ID: ${dealId}`); + console.log(' 📝 The deal may still be processing on-chain'); + console.log(' ✅ Proceeding with blob upload (this may still work)'); + } + + // Upload the blobs + console.log('📤 Uploading blobs to the deal...'); + try { + const result = await client.uploadBlobs({ + id: uploadId, + blobs: [blob], + deal: addPieceDeal, + chunkSize: 16 * 1024 * 1024 // Use 16MB chunks (server minimum requirement) + }); + console.log(' Blobs uploaded successfully'); + console.log(` - Uploaded chunks: ${result.uploadedChunks}`); + console.log(` - Uploaded bytes: ${result.uploadedBytes}`); + console.log(` - Finalize code: ${result.finalizeCode}`); + } catch (e) { + console.error('Upload error:', (e as Error).message); + try { + const re: any = e as any; + if (re && re.response) { + const status = re.response.status; + const text = await re.response.text().catch(() => ''); + console.error('Upload error status:', status); + console.error('Upload error body:', text); + } + } catch (_) {} + throw e; + } + + // Poll deal status until complete/failed + console.log('⏳ Polling deal status until complete/failed...'); + let finalStatusComplete = false; + for (let i = 0; i < 12; i++) { // up to 60 seconds with 5s interval + try { + const status = await client.getStatus(uploadId); + const pdp = status.pdpV1; + const st = pdp?.status; + console.log(` Status: ${st}${pdp?.errorMsg ? `, error: ${pdp.errorMsg}` : ''}`); + if (st === 'complete' || st === 'failed') { + finalStatusComplete = true; + break; + } + } catch (e) { + console.log(` Status check failed (attempt ${i + 1}): ${(e as Error).message}`); + if (i === 11) { + console.log(' ⚠️ Final status polling timed out after 60 seconds'); + break; + } + } + await sleep(5000); + } + + if (!finalStatusComplete) { + console.log(' ⏰ Final status polling timed out after 60 seconds'); + console.log(' 🔗 Please check the blockchain for final deal status:'); + console.log(` - Upload ID: ${uploadId}`); + console.log(` - Deal ID: ${dealId}`); + console.log(' 📝 The deal may still be processing on-chain'); + console.log(' ✅ Step completed - check chain for final status'); + } + + // Try to get final status, but don't fail if it times out + let finalStatus = null; + try { + finalStatus = await client.getStatus(uploadId); + } catch (e) { + console.log(' ⚠️ Could not get final status - check blockchain'); + } + + console.log('✅ Step 2 completed: Piece added and blobs uploaded'); + console.log(` - Upload ID: ${uploadId}`); + console.log(` - Deal ID: ${dealId}`); + console.log(` - Piece CID: ${pieceCid}`); + console.log(` - Dataset ID: ${targetDatasetId}`); + console.log(` - File size: ${blob.size} bytes`); + if (finalStatus) { + console.log(` - Deal status: ${finalStatus.pdpV1?.status}`); + } else { + console.log(` - Deal status: Check blockchain for final status`); + } + console.log(''); + console.log('Next: Run 3.ts to download and verify the uploaded content'); + + return { + uploadId, + dealId, + pieceCid, + blob, + bytes, + addPieceDeal, + finalStatus: finalStatus?.pdpV1?.status || 'check_blockchain', + }; +} + +if (require.main === module) { + run().catch(err => { + console.error('Step 2 failed:', err); + process.exit(1); + }); +} + +export { run as addPieceAndUpload }; \ No newline at end of file diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/3.ts b/market/mk20/tsclient/examples/unpkg-end-to-end/3.ts new file mode 100644 index 000000000..59f6dec46 --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/3.ts @@ -0,0 +1,88 @@ +// Step 3: Download Piece +// This step downloads the piece using piece CID from step 2 +// Set before running: +// PDP_URL=https://andyserver.thepianoexpress.com +// PDP_CLIENT=t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai // client wallet +// PDP_PUBLIC_KEY_B64=base64_of_raw_public_key_32_bytes # ed25519 mode +// PDP_PRIVATE_KEY_B64=base64_of_secret_key_64_or_seed_32 # ed25519 mode +// PDP_KEY_TYPE=ed25519|secp256k1 # default ed25519 +// PDP_SECP_PRIVATE_KEY_HEX=... or PDP_SECP_PRIVATE_KEY_B64=... +// PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 + +import { getAuthConfigFromEnv, buildAuthHeader, createClient } from './auth'; + +async function run(pieceCid?: string) { + console.log('📦 Step 3: Downloading Piece'); + console.log(' REQUIRED INPUT: Piece CID from Step 2'); + + // Get configuration from environment + const config = getAuthConfigFromEnv(); + + // Build authentication + const authHeader = await buildAuthHeader(config); + + // Create authenticated client + const client = createClient(config, authHeader); + + // Use provided pieceCid or get from environment + const targetPieceCid = pieceCid || process.env.PIECE_CID; + if (!targetPieceCid) { + console.error('❌ REQUIRED INPUT MISSING: Piece CID'); + console.error(' This step requires a piece CID from Step 2.'); + console.error(' Either pass as parameter: run("your-piece-cid")'); + console.error(' Or set environment variable: export PIECE_CID=your-piece-cid'); + console.error(''); + console.error(' To get the piece CID, run Step 2 first:'); + console.error(' npx ts-node 2.ts'); + throw new Error('REQUIRED INPUT MISSING: Piece CID from Step 2'); + } + + console.log(` Using piece CID: ${targetPieceCid}`); + + // Retrieve piece via market server + console.log('📦 Retrieving piece via market server...'); + try { + const base = config.serverUrl.replace(/\/$/, ''); + const url = `${base}/piece/${targetPieceCid}`; + console.log(` Retrieval URL: ${url}`); + + const r = await fetch(url); + console.log(` Retrieval HTTP status: ${r.status}`); + + if (r.ok) { + const retrieved = new Uint8Array(await r.arrayBuffer()); + console.log(` Retrieved ${retrieved.length} bytes`); + console.log('✅ Content retrieval: SUCCESS'); + + return { + pieceCid: targetPieceCid, + retrievedBytes: retrieved, + success: true, + }; + } else { + const errorText = await r.text().catch(() => ''); + console.log(` Retrieval failed with status ${r.status}: ${errorText}`); + return { + pieceCid: targetPieceCid, + success: false, + error: `HTTP ${r.status}: ${errorText}`, + }; + } + } catch (e) { + console.warn(' Retrieval attempt failed:', (e as Error).message); + return { + pieceCid: targetPieceCid, + success: false, + error: (e as Error).message, + }; + } +} + +if (require.main === module) { + run().catch(err => { + console.error('Step 3 failed:', err); + process.exit(1); + }); +} + +export { run as downloadPiece }; diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/4.ts b/market/mk20/tsclient/examples/unpkg-end-to-end/4.ts new file mode 100644 index 000000000..0fd967863 --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/4.ts @@ -0,0 +1,110 @@ +// Step 4: Delete +// This step deletes using upload ID from step 2 +// Set before running: +// PDP_URL=https://andyserver.thepianoexpress.com +// PDP_CLIENT=t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai // client wallet +// PDP_PUBLIC_KEY_B64=base64_of_raw_public_key_32_bytes # ed25519 mode +// PDP_PRIVATE_KEY_B64=base64_of_secret_key_64_or_seed_32 # ed25519 mode +// PDP_KEY_TYPE=ed25519|secp256k1 # default ed25519 +// PDP_SECP_PRIVATE_KEY_HEX=... or PDP_SECP_PRIVATE_KEY_B64=... +// PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 + +import { getAuthConfigFromEnv, buildAuthHeader, createClient } from './auth'; +import { CurioMarket } from '../../src'; + +async function sleep(ms: number) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +async function run(uploadId?: string) { + console.log('🗑️ Step 4: Deleting Deal'); + console.log(' REQUIRED INPUT: Upload ID from Step 2'); + + // Get configuration from environment + const config = getAuthConfigFromEnv(); + + // Build authentication + const authHeader = await buildAuthHeader(config); + + // Create authenticated client + const client = createClient(config, authHeader); + + // Use provided uploadId or get from environment + const targetUploadId = uploadId || process.env.UPLOAD_ID; + if (!targetUploadId) { + console.error('❌ REQUIRED INPUT MISSING: Upload ID'); + console.error(' This step requires an upload ID from Step 2.'); + console.error(' Either pass as parameter: run("upload-id")'); + console.error(' Or set environment variable: export UPLOAD_ID=your-upload-id'); + console.error(''); + console.error(' To get the upload ID, run Step 2 first:'); + console.error(' npx ts-node 2.ts'); + throw new Error('REQUIRED INPUT MISSING: Upload ID from Step 2'); + } + + console.log(` Using upload ID: ${targetUploadId}`); + + console.log(`🗑️ Requesting deletion of upload ${targetUploadId}...`); + + // Request deletion by updating the deal with delete flags + const deleteDeal: CurioMarket.Deal = { + identifier: targetUploadId, + client: config.clientAddr, + products: { + pdpV1: { + deletePiece: true, + deleteDataSet: true, + recordKeeper: config.recordKeeper, + } as CurioMarket.PDPV1 + } as CurioMarket.Products + }; + + try { + const result = await client.updateDeal(targetUploadId, deleteDeal); + console.log(` Deletion request submitted successfully: ${result}`); + } catch (e) { + console.error('Deletion request error:', (e as Error).message); + try { + const re: any = e as any; + if (re && re.response) { + const status = re.response.status; + const text = await re.response.text().catch(() => ''); + console.error('Deletion error status:', status); + console.error('Deletion error body:', text); + } + } catch (_) {} + throw e; + } + + // Poll deal status post-deletion + console.log('⏳ Polling deal status post-deletion...'); + for (let i = 0; i < 24; i++) { // up to ~2 minutes + const status = await client.getStatus(targetUploadId); + const pdp = status.pdpV1; + const st = pdp?.status; + console.log(` Status: ${st}${pdp?.errorMsg ? `, error: ${pdp.errorMsg}` : ''}`); + if (st === 'complete' || st === 'failed') break; + await sleep(5000); + } + + const finalStatus = await client.getStatus(targetUploadId); + console.log('✅ Step 4 completed: Deal deletion finished'); + console.log(` - Deleted upload ID: ${targetUploadId}`); + console.log(` - Final status: ${finalStatus.pdpV1?.status}`); + console.log(''); + console.log('🎉 All steps completed! End-to-end workflow finished successfully.'); + + return { + deletedUploadId: targetUploadId, + finalStatus: finalStatus.pdpV1?.status, + }; +} + +if (require.main === module) { + run().catch(err => { + console.error('Step 4 failed:', err); + process.exit(1); + }); +} + +export { run as deleteDeal }; diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/README.md b/market/mk20/tsclient/examples/unpkg-end-to-end/README.md new file mode 100644 index 000000000..db9c4cf6d --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/README.md @@ -0,0 +1,157 @@ +# Unpkg End-to-End Example + +This folder contains a step-by-step example of the complete PDPv1 workflow, broken down into individual steps that run in order and use output from previous steps. + +## Prerequisites + +Set the following environment variables before running any step: + +```bash +# Required +export PDP_URL=https://your-server.com +export PDP_CLIENT=t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai # client wallet +export PDP_CONTRACT=0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6 +export PDP_RECORD_KEEPER=t1000 # record keeper address (required for PDPv1) + +# For ed25519 authentication (default) +export PDP_PUBLIC_KEY_B64=base64_of_raw_public_key_32_bytes +export PDP_PRIVATE_KEY_B64=base64_of_secret_key_64_or_seed_32 +export PDP_KEY_TYPE=ed25519 + +# OR for secp256k1 authentication +export PDP_KEY_TYPE=secp256k1 +export PDP_SECP_PRIVATE_KEY_HEX=your_32_byte_hex_key +# OR +export PDP_SECP_PRIVATE_KEY_B64=your_32_byte_base64_key + +# Optional +export PDP_INSECURE_TLS=1 # Only for debugging - disables TLS verification +``` + +## Steps (Run in Order) + +### 1. Create Dataset (`1.ts`) +Creates a PDPv1 dataset (first part of `startPDPv1DealForUpload`). + +```bash +npx ts-node 1.ts +``` + +**What it does:** +- Creates a PDPv1 dataset with `createDataSet: true` +- Uses `submitDeal` API with dataset creation deal +- Waits for dataset creation to complete +- Returns dataset ID for use in step 2 + +**Output:** Dataset ID that should be passed to step 2 + +### 2. Add Piece (`2.ts`) +Adds a piece to the dataset (second part of `startPDPv1DealForUpload`). + +```bash +# Set the dataset ID from step 1 +export DATASET_ID=your_dataset_id_from_step_1 +npx ts-node 2.ts +``` + +**What it does:** +- Downloads file from unpkg to compute piece CID +- Creates add piece deal with `addPiece: true` and `dataSetId` +- Uses `submitDeal` API with add piece deal +- Waits for add piece to complete +- Returns upload ID, deal ID, and piece CID + +**Output:** Upload ID, deal ID, piece CID, and deal object for step 3 + +### 3. Upload Blobs (`3.ts`) +Uploads blobs using the deal from step 2. + +```bash +# Use the upload ID and deal from step 2 +export UPLOAD_ID=your_upload_id_from_step_2 +npx ts-node 3.ts +``` + +**What it does:** +- Uses `uploadBlobs` API with the deal from step 2 +- Uploads the file data to the deal +- Monitors upload progress until completion + +**Output:** Upload completion status + +### 4. Download Piece (`4.ts`) +Downloads the piece using piece CID from step 2. + +```bash +# Use the piece CID from step 2 +export PIECE_CID=your_piece_cid_from_step_2 +npx ts-node 4.ts +``` + +**What it does:** +- Retrieves the uploaded piece via market server +- Uses the piece CID provided from step 2 +- Verifies successful retrieval + +**Output:** Retrieved content and success status + +### 5. Delete (`5.ts`) +Deletes using upload ID from step 3. + +```bash +# Use the upload ID from step 3 +export UPLOAD_ID=your_upload_id_from_step_3 +npx ts-node 5.ts +``` + +**What it does:** +- Updates the deal with `deletePiece: true` and `deleteDataSet: true` +- Uses `updateDeal` API to request deletion +- Monitors deletion progress +- Completes the end-to-end workflow + +**Output:** Deletion confirmation and final status + +## Running the Complete Workflow + +You can run all steps in sequence, passing data between them: + +```bash +# Step 1: Create dataset +DATASET_ID=$(npx ts-node 1.ts | grep "Dataset ID:" | cut -d' ' -f3) + +# Step 2: Add piece +export DATASET_ID +UPLOAD_ID=$(npx ts-node 2.ts | grep "Upload ID:" | cut -d' ' -f3) +PIECE_CID=$(npx ts-node 2.ts | grep "Piece CID:" | cut -d' ' -f3) + +# Step 3: Upload blobs +export UPLOAD_ID +npx ts-node 3.ts + +# Step 4: Download piece +export PIECE_CID +npx ts-node 4.ts + +# Step 5: Delete +export UPLOAD_ID +npx ts-node 5.ts +``` + +## Files + +- `auth.ts` - Authentication helpers and configuration management +- `1.ts` - Create dataset step +- `2.ts` - Add piece step +- `3.ts` - Upload blobs step +- `4.ts` - Download piece step +- `5.ts` - Delete step +- `README.md` - This documentation + +## Notes + +- **Each step builds on the previous**: Steps are designed to run in order and use output from previous steps +- **Environment variables**: Use `DATASET_ID`, `UPLOAD_ID`, and `PIECE_CID` environment variables to pass data between steps +- **Matches startPDPv1DealForUpload**: Steps 1-2 replicate the internal logic of `startPDPv1DealForUpload` function +- **Real workflow**: This demonstrates the actual API calls you'd make in a production environment +- **Error handling**: All steps include comprehensive error handling and status reporting diff --git a/market/mk20/tsclient/examples/unpkg-end-to-end/auth.ts b/market/mk20/tsclient/examples/unpkg-end-to-end/auth.ts new file mode 100644 index 000000000..e825f8d40 --- /dev/null +++ b/market/mk20/tsclient/examples/unpkg-end-to-end/auth.ts @@ -0,0 +1,42 @@ +// Re-export all auth utilities from the main src module +export { + AuthConfig, + buildAuthHeader, + createClient, + sanitizeAuthHeader, + runPreflightChecks +} from '../../src/auth'; + +/** + * Get authentication configuration from environment variables + * This is the only environment-specific function that stays in examples + */ +export function getAuthConfigFromEnv(): import('../../src/auth').AuthConfig { + if (process.env.PDP_INSECURE_TLS === '1') { + // Disable TLS verification (use only for debugging!) + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; + console.warn('WARNING: PDP_INSECURE_TLS=1 set. TLS verification disabled.'); + } + + const keyType = (process.env.PDP_KEY_TYPE || 'ed25519').toLowerCase() as 'ed25519' | 'secp256k1'; + + const recordKeeper = process.env.PDP_RECORD_KEEPER; + if (!recordKeeper) { + console.error('❌ REQUIRED ENVIRONMENT VARIABLE MISSING: PDP_RECORD_KEEPER'); + console.error(' The record keeper is required for PDPv1 deals.'); + console.error(' Set it with: export PDP_RECORD_KEEPER=your-record-keeper-address'); + throw new Error('REQUIRED ENVIRONMENT VARIABLE MISSING: PDP_RECORD_KEEPER'); + } + + return { + serverUrl: process.env.PDP_URL || 'http://localhost:8080', + clientAddr: process.env.PDP_CLIENT || 'f1client...', + recordKeeper, + contractAddress: process.env.PDP_CONTRACT || '0x0000000000000000000000000000000000000000', + keyType, + publicKeyB64: process.env.PDP_PUBLIC_KEY_B64, + privateKeyB64: process.env.PDP_PRIVATE_KEY_B64, + secpPrivateKeyHex: process.env.PDP_SECP_PRIVATE_KEY_HEX, + secpPrivateKeyB64: process.env.PDP_SECP_PRIVATE_KEY_B64, + }; +} diff --git a/market/mk20/tsclient/jest.config.js b/market/mk20/tsclient/jest.config.js new file mode 100644 index 000000000..9d7765992 --- /dev/null +++ b/market/mk20/tsclient/jest.config.js @@ -0,0 +1,20 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + roots: ['/src', '/tests'], + testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'], + transform: { + '^.+\\.ts$': 'ts-jest', + }, + collectCoverageFrom: [ + 'src/**/*.ts', + '!src/**/*.d.ts', + ], + coverageDirectory: 'coverage', + coverageReporters: ['text', 'lcov', 'html'], + moduleFileExtensions: ['ts', 'js', 'json'], + setupFilesAfterEnv: ['/tests/setup.ts'], + moduleNameMapper: { + '^multiformats/(.*)$': '/tests/__mocks__/multiformats/$1', + }, +}; diff --git a/market/mk20/tsclient/openapitools.json b/market/mk20/tsclient/openapitools.json new file mode 100644 index 000000000..a82623d64 --- /dev/null +++ b/market/mk20/tsclient/openapitools.json @@ -0,0 +1,7 @@ +{ + "$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json", + "spaces": 2, + "generator-cli": { + "version": "7.14.0" + } +} diff --git a/market/mk20/tsclient/package-lock.json b/market/mk20/tsclient/package-lock.json new file mode 100644 index 000000000..5117d0ccc --- /dev/null +++ b/market/mk20/tsclient/package-lock.json @@ -0,0 +1,6158 @@ +{ + "name": "@curiostorage/market-client", + "version": "0.4.2", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@curiostorage/market-client", + "version": "0.4.2", + "license": "MIT", + "dependencies": { + "@glif/filecoin-address": "^4.0.0", + "@noble/secp256k1": "^2.1.0", + "@web3-storage/data-segment": "^5.3.0", + "isomorphic-fetch": "^3.0.0", + "multiformats": "^13.4.0", + "tweetnacl": "^1.0.3", + "ulid": "^3.0.1" + }, + "devDependencies": { + "@openapitools/openapi-generator-cli": "^2.7.0", + "@types/jest": "^29.0.0", + "@types/node": "^20.0.0", + "jest": "^29.0.0", + "ts-jest": "^29.0.0", + "typescript": "^5.0.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@adraffy/ens-normalize": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@adraffy/ens-normalize/-/ens-normalize-1.10.1.tgz", + "integrity": "sha512-96Z2IP3mYmF1Xg2cDm8f1gWGf/HUVedQ3FMifV4kG/PQ4yEP51xDtRAEfhVNt5f/uzpNkZHwWQuUcu6D6K+Ekw==", + "license": "MIT" + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.0.tgz", + "integrity": "sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.3.tgz", + "integrity": "sha512-yDBHV9kQNcr2/sUr9jghVyz9C3Y5G2zUM2H2lo+9mKv4sFgbA8s8Z9t8D1jiTkGoO/NoIfKMyKWr4s6CN23ZwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.3", + "@babel/parser": "^7.28.3", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.3", + "@babel/types": "^7.28.2", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz", + "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.3", + "@babel/types": "^7.28.2", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.3.tgz", + "integrity": "sha512-PTNtvUQihsAsDHMOP5pfobP8C6CM4JWXmP8DrEIt46c3r2bf87Ua1zoqevsMo9g+tWDwgWrFP5EIxuBx5RudAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.3.tgz", + "integrity": "sha512-7+Ey1mAgYqFAx2h0RuoxcQT5+MlG3GTV0TQrgr7/ZliKsm/MNDxVVutlWaziMq7wJNAz8MTqz55XLpWvva6StA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.3.tgz", + "integrity": "sha512-7w4kZYHneL3A6NP2nxzHvT3HCZ7puDZZjFMqDpBPECub79sTtSO5CGXDkKrTQq8ksAwfD/XI2MRFX23njdDaIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.3", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.2", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.2", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.2.tgz", + "integrity": "sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@borewit/text-codec": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.1.1.tgz", + "integrity": "sha512-5L/uBxmjaCIX5h8Z+uu+kA9BQLkc/Wl06UGR5ajNRxu+/XjonB5i8JpgFMrPj3LXTCPA0pv8yxUvbUi+QthGGA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@glif/filecoin-address": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@glif/filecoin-address/-/filecoin-address-4.0.0.tgz", + "integrity": "sha512-NgAM/EyPzRXKw3Uz331BjqIHH1nFfP9Gs52LyjUMcHhKrDrnp5WbY63yu+rGws9q9wAMl8jCjVD5VhN+AcUBqw==", + "license": "(Apache-2.0 OR MIT)", + "dependencies": { + "blakejs": "1.2.1", + "borc": "3.0.0", + "ethers": "6.13.2", + "leb128": "0.0.5", + "uint8arrays": "3.1.0" + } + }, + "node_modules/@inquirer/external-editor": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@inquirer/external-editor/-/external-editor-1.0.1.tgz", + "integrity": "sha512-Oau4yL24d2B5IL4ma4UpbQigkVhzPDXLoqy1ggK4gnHg/stmkffJE4oOXHXF3uz0UEpywG68KcyXsyYpA1Re/Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "chardet": "^2.1.0", + "iconv-lite": "^0.6.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@ipld/dag-cbor": { + "version": "9.2.5", + "resolved": "https://registry.npmjs.org/@ipld/dag-cbor/-/dag-cbor-9.2.5.tgz", + "integrity": "sha512-84wSr4jv30biui7endhobYhXBQzQE4c/wdoWlFrKcfiwH+ofaPg8fwsM8okX9cOzkkrsAsNdDyH3ou+kiLquwQ==", + "license": "Apache-2.0 OR MIT", + "dependencies": { + "cborg": "^4.0.0", + "multiformats": "^13.1.0" + }, + "engines": { + "node": ">=16.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.0.tgz", + "integrity": "sha512-TKY5pyBkHyADOPYlRT9Lx6F544mPl0vS5Ew7BJ45hA08Q+t3GjbueLliBWN3sMICk6+y7HdyxSzC4bWS8baBdg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@jest/reporters/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.30", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.30.tgz", + "integrity": "sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@lukeed/csprng": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz", + "integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@nestjs/axios": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@nestjs/axios/-/axios-4.0.1.tgz", + "integrity": "sha512-68pFJgu+/AZbWkGu65Z3r55bTsCPlgyKaV4BSG8yUAD72q1PPuyVRgUwFv6BxdnibTUHlyxm06FmYWNC+bjN7A==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "axios": "^1.3.1", + "rxjs": "^7.0.0" + } + }, + "node_modules/@nestjs/common": { + "version": "11.1.6", + "resolved": "https://registry.npmjs.org/@nestjs/common/-/common-11.1.6.tgz", + "integrity": "sha512-krKwLLcFmeuKDqngG2N/RuZHCs2ycsKcxWIDgcm7i1lf3sQ0iG03ci+DsP/r3FcT/eJDFsIHnKtNta2LIi7PzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "file-type": "21.0.0", + "iterare": "1.2.1", + "load-esm": "1.0.2", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "class-transformer": ">=0.4.1", + "class-validator": ">=0.13.2", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "class-transformer": { + "optional": true + }, + "class-validator": { + "optional": true + } + } + }, + "node_modules/@nestjs/core": { + "version": "11.1.6", + "resolved": "https://registry.npmjs.org/@nestjs/core/-/core-11.1.6.tgz", + "integrity": "sha512-siWX7UDgErisW18VTeJA+x+/tpNZrJewjTBsRPF3JVxuWRuAB1kRoiJcxHgln8Lb5UY9NdvklITR84DUEXD0Cg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@nuxt/opencollective": "0.4.1", + "fast-safe-stringify": "2.1.1", + "iterare": "1.2.1", + "path-to-regexp": "8.2.0", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "engines": { + "node": ">= 20" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/microservices": "^11.0.0", + "@nestjs/platform-express": "^11.0.0", + "@nestjs/websockets": "^11.0.0", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/platform-express": { + "optional": true + }, + "@nestjs/websockets": { + "optional": true + } + } + }, + "node_modules/@noble/curves": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.2.0.tgz", + "integrity": "sha512-oYclrNgRaM9SsBUBVbb8M6DTV7ZHRTKugureoYEncY5c65HOmRzvSiTE3y5CYaPYJA/GVkrhXEoF0M3Ya9PMnw==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.3.2" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.3.2.tgz", + "integrity": "sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/secp256k1": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-2.3.0.tgz", + "integrity": "sha512-0TQed2gcBbIrh7Ccyw+y/uZQvbJwm7Ao4scBUxqpBCcsOlZG0O4KGfjtNAy/li4W8n1xt3dxrwJ0beZ2h2G6Kw==", + "license": "MIT", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@nuxt/opencollective": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@nuxt/opencollective/-/opencollective-0.4.1.tgz", + "integrity": "sha512-GXD3wy50qYbxCJ652bDrDzgMr3NFEkIS374+IgFQKkCvk9yiYcLvX2XDYr7UyQxf4wK0e+yqDYRubZ0DtOxnmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "consola": "^3.2.3" + }, + "bin": { + "opencollective": "bin/opencollective.js" + }, + "engines": { + "node": "^14.18.0 || >=16.10.0", + "npm": ">=5.10.0" + } + }, + "node_modules/@nuxtjs/opencollective": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@nuxtjs/opencollective/-/opencollective-0.3.2.tgz", + "integrity": "sha512-um0xL3fO7Mf4fDxcqx9KryrB7zgRM5JSlvGN5AGkP6JLM5XEKyjeAiPbNxdXVXQ16isuAhYpvP88NgL2BGd6aA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.0", + "node-fetch": "^2.6.1" + }, + "bin": { + "opencollective": "bin/opencollective.js" + }, + "engines": { + "node": ">=8.0.0", + "npm": ">=5.0.0" + } + }, + "node_modules/@nuxtjs/opencollective/node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@openapitools/openapi-generator-cli": { + "version": "2.23.1", + "resolved": "https://registry.npmjs.org/@openapitools/openapi-generator-cli/-/openapi-generator-cli-2.23.1.tgz", + "integrity": "sha512-Kd5EZqzbcIXf6KRlpUrheHMzQNRHsJWzAGrm4ncWCNhnQl+Mh6TsFcqq+hIetgiFCknWBH6cZ2f37SxPxaon4w==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@nestjs/axios": "4.0.1", + "@nestjs/common": "11.1.6", + "@nestjs/core": "11.1.6", + "@nuxtjs/opencollective": "0.3.2", + "axios": "1.11.0", + "chalk": "4.1.2", + "commander": "8.3.0", + "compare-versions": "4.1.4", + "concurrently": "9.2.1", + "console.table": "0.10.0", + "fs-extra": "11.3.1", + "glob": "11.0.3", + "inquirer": "8.2.7", + "proxy-agent": "6.5.0", + "reflect-metadata": "0.2.2", + "rxjs": "7.8.2", + "tslib": "2.8.1" + }, + "bin": { + "openapi-generator-cli": "main.js" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/openapi_generator" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@sovpro/delimited-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@sovpro/delimited-stream/-/delimited-stream-1.1.0.tgz", + "integrity": "sha512-kQpk267uxB19X3X2T1mvNMjyvIEonpNSHrMlK5ZaBU6aZxw7wPbpgKJOjHN3+/GPVpXgAV9soVT2oyHpLkLtyw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@tokenizer/token": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", + "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tootallnate/quickjs-emscripten": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", + "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/node": { + "version": "20.19.11", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.11.tgz", + "integrity": "sha512-uug3FEEGv0r+jrecvUUpbY8lLisvIjg6AAic6a2bSP5OEOLeJsDSnvhCDov7ipFFMXS3orMpzlmi0ZcuGkBbow==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@web3-storage/data-segment": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@web3-storage/data-segment/-/data-segment-5.3.0.tgz", + "integrity": "sha512-zFJ4m+pEKqtKatJNsFrk/2lHeFSbkXZ6KKXjBe7/2ayA9wAar7T/unewnOcZrrZTnCWmaxKsXWqdMFy9bXK9dw==", + "license": "(Apache-2.0 AND MIT)", + "dependencies": { + "@ipld/dag-cbor": "^9.2.1", + "multiformats": "^13.3.0", + "sync-multihash-sha2": "^1.0.0" + } + }, + "node_modules/aes-js": { + "version": "4.0.0-beta.5", + "resolved": "https://registry.npmjs.org/aes-js/-/aes-js-4.0.0-beta.5.tgz", + "integrity": "sha512-G965FqalsNyrPqgEGON7nIx1e/OVENSgiEIzyC63haUMuvNnwIgIjMs52hlTCKhkBny7A2ORNlfY9Zu+jmGk1Q==", + "license": "MIT" + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.11.0.tgz", + "integrity": "sha512-1Lx3WLFQWm3ooKDYZD1eXmoGO9fxYQjrycfHFC8P0sCfQVXyROp0p9PFWBehewBOdCwHc+f/b8I0fMto5eSfwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/basic-ftp": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.5.tgz", + "integrity": "sha512-4Bcg1P8xhUuqcii/S0Z9wiHIrQVPMermM1any+MX5GeGD7faD3/msQUDGLol9wOcz4/jbg/WJnGqoJF6LiBdtg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/blakejs": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/blakejs/-/blakejs-1.2.1.tgz", + "integrity": "sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ==", + "license": "MIT" + }, + "node_modules/bn.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.2.tgz", + "integrity": "sha512-v2YAxEmKaBLahNwE1mjp4WON6huMNeuDvagFZW+ASCuA/ku0bXR9hSMw0XpiqMoA3+rmnyck/tPRSFQkoC9Cuw==", + "license": "MIT" + }, + "node_modules/borc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/borc/-/borc-3.0.0.tgz", + "integrity": "sha512-ec4JmVC46kE0+layfnwM3l15O70MlFiEbmQHY/vpqIKiUtPVntv4BY4NVnz3N4vb21edV3mY97XVckFvYHWF9g==", + "license": "MIT", + "dependencies": { + "bignumber.js": "^9.0.0", + "buffer": "^6.0.3", + "commander": "^2.15.0", + "ieee754": "^1.1.13", + "iso-url": "^1.1.5", + "json-text-sequence": "~0.3.0", + "readable-stream": "^3.6.0" + }, + "bin": { + "cbor2comment": "bin/cbor2comment.js", + "cbor2diag": "bin/cbor2diag.js", + "cbor2json": "bin/cbor2json.js", + "json2cbor": "bin/json2cbor.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/borc/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/borc/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.25.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.4.tgz", + "integrity": "sha512-4jYpcjabC606xJ3kw2QwGEZKX0Aw7sgQdZCvIK9dhVSPh76BKo+C+btT1RRofH7B+8iNpEbgGNVWiLki5q93yg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001737", + "electron-to-chromium": "^1.5.211", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/buffer-pipe": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/buffer-pipe/-/buffer-pipe-0.0.3.tgz", + "integrity": "sha512-GlxfuD/NrKvCNs0Ut+7b1IHjylfdegMBxQIlZHj7bObKVQBxB5S84gtm2yu1mQ8/sSggceWBDPY0cPXgvX2MuA==", + "license": "MPL-2.0", + "dependencies": { + "safe-buffer": "^5.1.2" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001739", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001739.tgz", + "integrity": "sha512-y+j60d6ulelrNSwpPyrHdl+9mJnQzHBr08xm48Qno0nSk4h3Qojh+ziv2qE6rXf4k3tadF4o1J/1tAbVm1NtnA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/cborg": { + "version": "4.2.15", + "resolved": "https://registry.npmjs.org/cborg/-/cborg-4.2.15.tgz", + "integrity": "sha512-T+YVPemWyXcBVQdp0k61lQp2hJniRNmul0lAwTj2DTS/6dI4eCq/MRMucGqqvFqMBfmnD8tJ9aFtPu5dEGAbgw==", + "license": "Apache-2.0", + "bin": { + "cborg": "lib/bin.js" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/chardet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.0.tgz", + "integrity": "sha512-bNFETTG/pM5ryzQ9Ad0lJOTa6HWD/YsScAR3EnCPZRPlQh77JocYktSHOUHelyhm8IARL+o4c4F1bP5KVOjiRA==", + "dev": true, + "license": "MIT" + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-width": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 10" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/compare-versions": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-4.1.4.tgz", + "integrity": "sha512-FemMreK9xNyL8gQevsdRMrvO4lFCkQP7qbuktn1q8ndcNk1+0mz7lgE7b/sNvbhVgY4w6tMN1FDp6aADjqw2rw==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/concurrently": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-9.2.1.tgz", + "integrity": "sha512-fsfrO0MxV64Znoy8/l1vVIjjHa29SZyyqPgQBwhiDcaW8wJc2W3XWVOGx4M3oJBnv/zdUZIIp1gDeS98GzP8Ng==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "4.1.2", + "rxjs": "7.8.2", + "shell-quote": "1.8.3", + "supports-color": "8.1.1", + "tree-kill": "1.2.2", + "yargs": "17.7.2" + }, + "bin": { + "conc": "dist/bin/concurrently.js", + "concurrently": "dist/bin/concurrently.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/open-cli-tools/concurrently?sponsor=1" + } + }, + "node_modules/concurrently/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/console.table": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/console.table/-/console.table-0.10.0.tgz", + "integrity": "sha512-dPyZofqggxuvSf7WXvNjuRfnsOk1YazkVP8FdxH4tcH2c37wc79/Yl6Bhr7Lsu00KMgy2ql/qCMuNu8xctZM8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "easy-table": "1.1.0" + }, + "engines": { + "node": "> 0.10" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/data-uri-to-buffer": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz", + "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.6.0.tgz", + "integrity": "sha512-F1Z+5UCFpmQUzJa11agbyPVMbpgT/qA3/SKyJ1jyBgm7dUcUEa8v9JwDkerSQXfakBwFljIxhOJqGkjUwZ9FSA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/degenerator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz", + "integrity": "sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ast-types": "^0.13.4", + "escodegen": "^2.1.0", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/easy-table": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/easy-table/-/easy-table-1.1.0.tgz", + "integrity": "sha512-oq33hWOSSnl2Hoh00tZWaIPi1ievrD9aFG82/IgjlycAnW9hHx5PkJiXpxPsgEE+H7BsbVQXFVFST8TEXS6/pA==", + "dev": true, + "license": "MIT", + "optionalDependencies": { + "wcwidth": ">=1.0.1" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.211", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.211.tgz", + "integrity": "sha512-IGBvimJkotaLzFnwIVgW9/UD/AOJ2tByUmeOrtqBfACSbAw5b1G0XpvdaieKyc7ULmbwXVx+4e4Be8pOPBrYkw==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ethers": { + "version": "6.13.2", + "resolved": "https://registry.npmjs.org/ethers/-/ethers-6.13.2.tgz", + "integrity": "sha512-9VkriTTed+/27BGuY1s0hf441kqwHJ1wtN2edksEtiRvXx+soxRX3iSXTfFqq2+YwrOqbDoTHjIhQnjJRlzKmg==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/ethers-io/" + }, + { + "type": "individual", + "url": "https://www.buymeacoffee.com/ricmoo" + } + ], + "license": "MIT", + "dependencies": { + "@adraffy/ens-normalize": "1.10.1", + "@noble/curves": "1.2.0", + "@noble/hashes": "1.3.2", + "@types/node": "18.15.13", + "aes-js": "4.0.0-beta.5", + "tslib": "2.4.0", + "ws": "8.17.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/ethers/node_modules/@types/node": { + "version": "18.15.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.13.tgz", + "integrity": "sha512-N+0kuo9KgrUQ1Sn/ifDXsvg0TTleP7rIy4zOBGECxAljqvqfqpTfzx0Q1NUedOixRMBfe2Whhb056a42cWs26Q==", + "license": "MIT" + }, + "node_modules/ethers/node_modules/tslib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==", + "license": "0BSD" + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==", + "dev": true, + "license": "MIT" + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/file-type": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.0.0.tgz", + "integrity": "sha512-ek5xNX2YBYlXhiUXui3D/BXa3LdqPmoLJ7rqEx2bKJ7EAUEfmXgW0Das7Dc6Nr9MvqaOnIqiPV0mZk/r/UpNAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tokenizer/inflate": "^0.2.7", + "strtok3": "^10.2.2", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs-extra": { + "version": "11.3.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.1.tgz", + "integrity": "sha512-eXvGGwZ5CL17ZSwHWd3bbgk7UUpF6IFHtP57NYYakPvHOs8GDgDe5KJI36jIJzDkJ6eJjuzRA8eBQb6SkKue0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-uri": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.5.tgz", + "integrity": "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-ftp": "^5.0.2", + "data-uri-to-buffer": "^6.0.2", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/glob": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-11.0.3.tgz", + "integrity": "sha512-2Nim7dha1KVkaiF4q6Dj+ngPPMdfvLJEOpZk/jKiUAkqKebpGAWQXAq9z1xu9HKu5lWfqw/FASuccEjyznjPaA==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.3.1", + "jackspeak": "^4.1.1", + "minimatch": "^10.0.3", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^2.0.0" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/inquirer": { + "version": "8.2.7", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.7.tgz", + "integrity": "sha512-UjOaSel/iddGZJ5xP/Eixh6dY1XghiBw4XK13rCCIJcJfyhhoul/7KhLLUGtebEj6GDYM6Vnx/mVsjx2L/mFIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@inquirer/external-editor": "^1.0.0", + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.1", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "figures": "^3.0.0", + "lodash": "^4.17.21", + "mute-stream": "0.0.8", + "ora": "^5.4.1", + "run-async": "^2.4.0", + "rxjs": "^7.5.5", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6", + "wrap-ansi": "^6.0.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/ip-address": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz", + "integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/iso-url": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iso-url/-/iso-url-1.2.1.tgz", + "integrity": "sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/isomorphic-fetch": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-3.0.0.tgz", + "integrity": "sha512-qvUtwJ3j6qwsF3jLxkZ72qCgjMysPzDfeV240JHiGZsANBYd+EEuu35v7dfrJ9Up0Ak07D7GGSkGhCHTqg/5wA==", + "license": "MIT", + "dependencies": { + "node-fetch": "^2.6.1", + "whatwg-fetch": "^3.4.1" + } + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/iterare": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iterare/-/iterare-1.2.1.tgz", + "integrity": "sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=6" + } + }, + "node_modules/jackspeak": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.1.1.tgz", + "integrity": "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-config/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-runtime/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-text-sequence": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/json-text-sequence/-/json-text-sequence-0.3.0.tgz", + "integrity": "sha512-7khKIYPKwXQem4lWXfpIN/FEnhztCeRPSxH4qm3fVlqulwujrRDD54xAwDDn/qVKpFtV550+QAkcWJcufzqQuA==", + "license": "MIT", + "dependencies": { + "@sovpro/delimited-stream": "^1.1.0" + }, + "engines": { + "node": ">=10.18.0" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/leb128": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/leb128/-/leb128-0.0.5.tgz", + "integrity": "sha512-elbNtfmu3GndZbesVF6+iQAfVjOXW9bM/aax9WwMlABZW+oK9sbAZEXoewaPHmL34sxa8kVwWsru8cNE/yn2gg==", + "license": "MPL-2.0", + "dependencies": { + "bn.js": "^5.0.0", + "buffer-pipe": "0.0.3" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/load-esm": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/load-esm/-/load-esm-1.0.2.tgz", + "integrity": "sha512-nVAvWk/jeyrWyXEAs84mpQCYccxRqgKY4OznLuJhJCa0XsPSfdOIr2zvBZEj3IHEHbX97jjscKRRV539bW0Gpw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + }, + { + "type": "buymeacoffee", + "url": "https://buymeacoffee.com/borewit" + } + ], + "license": "MIT", + "engines": { + "node": ">=13.2.0" + } + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.3.tgz", + "integrity": "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/multiformats": { + "version": "13.4.0", + "resolved": "https://registry.npmjs.org/multiformats/-/multiformats-13.4.0.tgz", + "integrity": "sha512-Mkb/QcclrJxKC+vrcIFl297h52QcKh2Az/9A5vbWytbQt4225UWWWmIuSsKksdww9NkIeYcA7DkfftyLuC/JSg==", + "license": "Apache-2.0 OR MIT" + }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "dev": true, + "license": "ISC" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/netmask": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", + "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pac-proxy-agent": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz", + "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/quickjs-emscripten": "^0.23.0", + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "get-uri": "^6.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.6", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-resolver": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", + "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==", + "dev": true, + "license": "MIT", + "dependencies": { + "degenerator": "^5.0.0", + "netmask": "^2.0.2" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.0.tgz", + "integrity": "sha512-ypGJsmGtdXUOeM5u93TyeIEfEhM6s+ljAhrk5vAvSx8uyY/02OvrZnA0YNGUrPXfpJMgI1ODd3nwz8Npx4O4cg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.1.0.tgz", + "integrity": "sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==", + "dev": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/path-to-regexp": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz", + "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proxy-agent": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.5.0.tgz", + "integrity": "sha512-TmatMXdr2KlRiA2CyDu8GqR8EjahTG3aY3nXjdzFyoZbmB8hrBsTyMezhULIXKnC0jpfjlmiZ3+EaCzoInSu/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "http-proxy-agent": "^7.0.1", + "https-proxy-agent": "^7.0.6", + "lru-cache": "^7.14.1", + "pac-proxy-agent": "^7.1.0", + "proxy-from-env": "^1.1.0", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "dev": true, + "license": "MIT" + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz", + "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.7", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", + "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ip-address": "^10.0.1", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strtok3": { + "version": "10.3.4", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.4.tgz", + "integrity": "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tokenizer/token": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/sync-multihash-sha2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/sync-multihash-sha2/-/sync-multihash-sha2-1.0.0.tgz", + "integrity": "sha512-A5gVpmtKF0ov+/XID0M0QRJqF2QxAsj3x/LlDC8yivzgoYCoWkV+XaZPfVu7Vj1T/hYzYS1tfjwboSbXjqocug==", + "license": "(Apache-2.0 AND MIT)", + "dependencies": { + "@noble/hashes": "^1.3.1" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/token-types": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.1.tgz", + "integrity": "sha512-kh9LVIWH5CnL63Ipf0jhlBIy0UsrMj/NJDfpsy1SqOXlLKEVyXXYrnFxFT1yOOYVGBSApeVnjPw/sBz5BfEjAQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@borewit/text-codec": "^0.1.0", + "@tokenizer/token": "^0.3.0", + "ieee754": "^1.2.1" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "dev": true, + "license": "MIT", + "bin": { + "tree-kill": "cli.js" + } + }, + "node_modules/ts-jest": { + "version": "29.4.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.1.tgz", + "integrity": "sha512-SaeUtjfpg9Uqu8IbeDKtdaS0g8lS6FT6OzM3ezrDfErPJPHNDo/Ey+VFGP1bQIDfagYDLyRpd7O15XpG1Es2Uw==", + "dev": true, + "license": "MIT", + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.2", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/tweetnacl": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", + "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==", + "license": "Unlicense" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/uid": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/uid/-/uid-2.0.2.tgz", + "integrity": "sha512-u3xV3X7uzvi5b1MncmZo3i2Aw222Zk1keqLA1YkHldREkAhAqi65wuPfe7lHx8H/Wzy+8CE7S7uS3jekIM5s8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@lukeed/csprng": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uint8array-extras": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.5.0.tgz", + "integrity": "sha512-rvKSBiC5zqCCiDZ9kAOszZcDvdAHwwIKJG33Ykj43OKcWsnmcBRL09YTU4nOeHZ8Y2a7l1MgTd08SBe9A8Qj6A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/uint8arrays": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/uint8arrays/-/uint8arrays-3.1.0.tgz", + "integrity": "sha512-ei5rfKtoRO8OyOIor2Rz5fhzjThwIHJZ3uyDPnDHTXbP0aMQ1RN/6AI5B5d9dBxJOU+BvOAk7ZQ1xphsX8Lrog==", + "license": "MIT", + "dependencies": { + "multiformats": "^9.4.2" + } + }, + "node_modules/uint8arrays/node_modules/multiformats": { + "version": "9.9.0", + "resolved": "https://registry.npmjs.org/multiformats/-/multiformats-9.9.0.tgz", + "integrity": "sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg==", + "license": "(Apache-2.0 AND MIT)" + }, + "node_modules/ulid": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ulid/-/ulid-3.0.1.tgz", + "integrity": "sha512-dPJyqPzx8preQhqq24bBG1YNkvigm87K8kVEHCD+ruZg24t6IFEFv00xMWfxcC4djmFtiTLdFuADn4+DOz6R7Q==", + "license": "MIT", + "bin": { + "ulid": "dist/cli.js" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-fetch": { + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "license": "MIT" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/write-file-atomic/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/market/mk20/tsclient/package.json b/market/mk20/tsclient/package.json new file mode 100644 index 000000000..6a196cc52 --- /dev/null +++ b/market/mk20/tsclient/package.json @@ -0,0 +1,47 @@ +{ + "name": "@curiostorage/market-client", + "version": "0.4.2", + "description": "TypeScript API client for Curio storage market. You probably want the Synapse SDK instead.", + "main": "dist/src/index.js", + "types": "dist/src/index.d.ts", + "scripts": { + "build": "npm run generate && npm run compile", + "generate": "openapi-generator-cli generate -i ../http/swagger.json -g typescript-fetch -o ./generated --additional-properties=supportsES6=true,typescriptThreePlus=true --skip-validate-spec", + "compile": "tsc", + "clean": "rm -rf dist generated", + "test": "jest", + "test:watch": "jest --watch", + "test:coverage": "jest --coverage", + "prepublishOnly": "npm run build" + }, + "keywords": [ + "curio", + "filecoin", + "storage", + "market", + "api", + "typescript" + ], + "author": "Curio Team", + "license": "MIT", + "devDependencies": { + "@openapitools/openapi-generator-cli": "^2.7.0", + "@types/jest": "^29.0.0", + "@types/node": "^20.0.0", + "jest": "^29.0.0", + "ts-jest": "^29.0.0", + "typescript": "^5.0.0" + }, + "dependencies": { + "@glif/filecoin-address": "^4.0.0", + "@noble/secp256k1": "^2.1.0", + "@web3-storage/data-segment": "^5.3.0", + "isomorphic-fetch": "^3.0.0", + "multiformats": "^13.4.0", + "tweetnacl": "^1.0.3", + "ulid": "^3.0.1" + }, + "engines": { + "node": ">=18.0.0" + } +} diff --git a/market/mk20/tsclient/scripts/build.sh b/market/mk20/tsclient/scripts/build.sh new file mode 100755 index 000000000..fc6f06b2b --- /dev/null +++ b/market/mk20/tsclient/scripts/build.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -e + +echo "🚀 Building Curio TypeScript Market Client..." + +# Check if Node.js is installed +if ! command -v node &> /dev/null; then + echo "❌ Node.js is not installed. Please install Node.js 18+ first." + exit 1 +fi + +# Check Node.js version +NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1) +if [ "$NODE_VERSION" -lt 18 ]; then + echo "❌ Node.js version 18+ is required. Current version: $(node -v)" + exit 1 +fi + +echo "✅ Node.js version: $(node -v)" + +# Check if npm is installed +if ! command -v npm &> /dev/null; then + echo "❌ npm is not installed. Please install npm first." + exit 1 +fi + +echo "✅ npm version: $(npm -v)" + +# Clean previous builds +echo "🧹 Cleaning previous builds..." +npm run clean + +# Install dependencies +echo "📦 Installing dependencies..." +npm install + +# Generate client from swagger +echo "🔧 Generating TypeScript client from swagger files..." +npm run generate + +# Compile TypeScript +echo "⚙️ Compiling TypeScript..." +npm run compile + +echo "✅ Build completed successfully!" +echo "" +echo "📁 Generated files:" +echo " - Generated client: ./generated/" +echo " - Compiled output: ./dist/" +echo " - Type definitions: ./dist/index.d.ts" +echo "" +echo "🚀 You can now use the client:" +echo " import { Client } from '@curio/market-client';" +echo "" +echo "📚 See examples/ for usage examples" diff --git a/market/mk20/tsclient/src/auth.ts b/market/mk20/tsclient/src/auth.ts new file mode 100644 index 000000000..295c92ec0 --- /dev/null +++ b/market/mk20/tsclient/src/auth.ts @@ -0,0 +1,425 @@ +// Lazy import to avoid hard dependency during build environments without install +let nacl: any; +async function getNacl(): Promise { + if (!nacl) { + // eslint-disable-next-line @typescript-eslint/no-var-requires + nacl = require('tweetnacl'); + } + return nacl; +} + +let nobleSecp: any; +async function getSecp(): Promise { + if (!nobleSecp) { + // eslint-disable-next-line @typescript-eslint/no-var-requires + nobleSecp = require('@noble/secp256k1'); + // Provide sync HMAC-SHA256 for RFC6979 (required by noble's sign) + try { + if (!nobleSecp.etc.hmacSha256Sync) { + // eslint-disable-next-line @typescript-eslint/no-var-requires + const nodeCrypto = require('crypto'); + const concat = (...arrs: Uint8Array[]) => { + const total = arrs.reduce((s, a) => s + a.length, 0); + const out = new Uint8Array(total); + let off = 0; + for (const a of arrs) { out.set(a, off); off += a.length; } + return out; + }; + nobleSecp.etc.hmacSha256Sync = (key: Uint8Array, ...msgs: Uint8Array[]) => { + const h = nodeCrypto.createHmac('sha256', Buffer.from(key)); + const all = concat(...msgs); + h.update(Buffer.from(all)); + return new Uint8Array(h.digest()); + }; + } + } catch (_) { + // leave as-is; if not set, noble will throw, which surfaces clearly + } + } + return nobleSecp; +} + +/** + * Utilities to construct Curio Market 2.0 Authorization headers. + * + * Header format: + * Authorization: "CurioAuth ::" + * + * - For ed25519: + * - pubKeyBase64: base64 of 32-byte raw public key + * - signatureBase64: base64 of detached ed25519 signature over sha256(pubKey || RFC3339Hour) + * - For secp256k1 / bls / delegated: not implemented here (Filecoin signature envelope required) + */ +export class AuthUtils { + /** Signer interface for pluggable key types. */ + static readonly KEYTYPE_ED25519 = 'ed25519' as const; + + /** + * Build Authorization header from a provided signer and key type. + * Currently supports 'ed25519'. + */ + static async buildAuthHeader( + signer: AuthSigner, + keyType: 'ed25519' | 'secp256k1', + now?: Date, + ): Promise { + switch (keyType) { + case 'ed25519': + return this.buildEd25519AuthHeader(await signer.getPublicKey(), await signer.sign.bind(signer), now); + case 'secp256k1': { + const addrBytes = await signer.getPublicKey(); + const ts = this.rfc3339TruncatedToHour(now); + const msg = await this.sha256Concat(addrBytes, new TextEncoder().encode(ts)); + const sigEnvelope = await signer.sign(msg); // expected to be Filecoin signature envelope bytes + const pubB64 = this.toBase64(addrBytes); + const sigB64 = this.toBase64(sigEnvelope); + return `CurioAuth secp256k1:${pubB64}:${sigB64}`; + } + default: + throw new Error(`Unsupported key type: ${keyType}`); + } + } + + /** + * Build Authorization header for ed25519 keys. + * @param publicKeyRaw - 32-byte ed25519 public key (raw) + * @param privateKeyOrSign - ed25519 private key bytes (64 secretKey or 32 seed), + * OR a sign function (message)=>signature + * @param now - Optional date used for timestamp; defaults to current time + * @returns Authorization header value (without the "Authorization: " prefix) + */ + static async buildEd25519AuthHeader( + publicKeyRaw: Uint8Array, + privateKeyOrSign: Uint8Array | ((message: Uint8Array) => Promise | Uint8Array), + now?: Date, + ): Promise { + if (publicKeyRaw.length !== 32) { + throw new Error(`ed25519 publicKey must be 32 bytes, got ${publicKeyRaw.length}`); + } + + const ts = this.rfc3339TruncatedToHour(now); + const message = await this.sha256Concat(publicKeyRaw, new TextEncoder().encode(ts)); + let signature: Uint8Array; + if (typeof privateKeyOrSign === 'function') { + signature = await privateKeyOrSign(message); + } else { + const secretKey = this.ensureEd25519SecretKey(privateKeyOrSign); + const n = await getNacl(); + signature = n.sign.detached(message, secretKey); + } + + const pubB64 = this.toBase64(publicKeyRaw); + const sigB64 = this.toBase64(signature); + return `CurioAuth ed25519:${pubB64}:${sigB64}`; + } + + /** Return headers object with Authorization set for ed25519. */ + static async makeAuthHeadersEd25519( + publicKeyRaw: Uint8Array, + privateKey: Uint8Array, + now?: Date, + ): Promise> { + const value = await this.buildEd25519AuthHeader(publicKeyRaw, privateKey, now); + return { Authorization: value }; + } + + /** Convert a 32-byte seed or 64-byte secretKey into a 64-byte secretKey. */ + private static ensureEd25519SecretKey(privateKey: Uint8Array): Uint8Array { + if (privateKey.length === 64) { + return privateKey; + } + if (privateKey.length === 32) { + const n = require('tweetnacl'); + const kp = n.sign.keyPair.fromSeed(privateKey); + return kp.secretKey; + } + throw new Error(`ed25519 private key must be 32-byte seed or 64-byte secretKey, got ${privateKey.length}`); + } + + /** RFC3339 timestamp truncated to the hour, always UTC, e.g., 2025-07-15T17:00:00Z */ + static rfc3339TruncatedToHour(date?: Date): string { + const d = date ? new Date(date) : new Date(); + const y = d.getUTCFullYear(); + const m = (d.getUTCMonth() + 1).toString().padStart(2, '0'); + const day = d.getUTCDate().toString().padStart(2, '0'); + const h = d.getUTCHours().toString().padStart(2, '0'); + return `${y}-${m}-${day}T${h}:00:00Z`; + } + + /** Compute sha256 over concatenation of two byte arrays. */ + private static async sha256Concat(a: Uint8Array, b: Uint8Array): Promise { + const combined = new Uint8Array(a.length + b.length); + combined.set(a, 0); + combined.set(b, a.length); + // Prefer WebCrypto when available + if (typeof globalThis !== 'undefined' && (globalThis as any).crypto?.subtle) { + const hashBuf = await (globalThis as any).crypto.subtle.digest('SHA-256', combined); + return new Uint8Array(hashBuf); + } + // Fallback to Node crypto + try { + const nodeCrypto = await import('crypto'); + const hasher = nodeCrypto.createHash('sha256'); + hasher.update(Buffer.from(combined)); + return new Uint8Array(hasher.digest()); + } catch { + throw new Error('No available crypto implementation to compute SHA-256 digest'); + } + } + + /** Base64 encode Uint8Array across environments. */ + private static toBase64(bytes: Uint8Array): string { + if (typeof Buffer !== 'undefined') { + // Node + return Buffer.from(bytes).toString('base64'); + } + // Browser + let binary = ''; + for (let i = 0; i < bytes.length; i++) binary += String.fromCharCode(bytes[i]); + return btoa(binary); + } + + /** Compute BLAKE2b-256 digest (32 bytes). */ + static async blake2b256(data: Uint8Array): Promise { + try { + // eslint-disable-next-line @typescript-eslint/no-var-requires + const nodeCrypto = require('crypto'); + try { + const h = nodeCrypto.createHash('blake2b512', { outputLength: 32 }); + h.update(Buffer.from(data)); + return new Uint8Array(h.digest()); + } catch (_) { + // fall back to blakejs + } + } catch (_) {} + try { + // eslint-disable-next-line @typescript-eslint/no-var-requires + const blake = require('blakejs'); + const out = blake.blake2b(data, undefined, 32); + return new Uint8Array(out); + } catch (_) { + throw new Error('No available BLAKE2b-256 implementation'); + } + } +} + +export default AuthUtils; + +// Configuration interface for authentication +export interface AuthConfig { + serverUrl: string; + clientAddr: string; + recordKeeper: string; + contractAddress: string; + keyType: 'ed25519' | 'secp256k1'; + publicKeyB64?: string; + privateKeyB64?: string; + secpPrivateKeyHex?: string; + secpPrivateKeyB64?: string; +} + +/** Generic signer interface */ +export interface AuthSigner { + getPublicKey(): Promise | Uint8Array; + sign(message: Uint8Array): Promise | Uint8Array; +} + +/** Ed25519 signer that takes public and private key material at construction */ +export class Ed25519KeypairSigner implements AuthSigner { + private readonly publicKeyRaw: Uint8Array; + private readonly secretKey: Uint8Array; + + constructor(publicKeyRaw: Uint8Array, privateKey: Uint8Array) { + if (publicKeyRaw.length !== 32) { + throw new Error(`ed25519 publicKey must be 32 bytes, got ${publicKeyRaw.length}`); + } + this.publicKeyRaw = publicKeyRaw; + this.secretKey = AuthUtils['ensureEd25519SecretKey'](privateKey); + } + + getPublicKey(): Uint8Array { + return this.publicKeyRaw; + } + + async sign(message: Uint8Array): Promise { + const n = await getNacl(); + return n.sign.detached(message, this.secretKey); + } +} + +/** Secp256k1 signer using a Filecoin address and secp256k1 private key. */ +export class Secp256k1AddressSigner implements AuthSigner { + private readonly addressBytes: Uint8Array; + private readonly privateKey: Uint8Array; + + /** + * @param addressString - Filecoin address string (f1/t1) + * @param privateKey - 32-byte secp256k1 private key (Uint8Array) + */ + constructor(addressString: string, privateKey: Uint8Array) { + this.addressBytes = Secp256k1AddressSigner.addressBytesFromString(addressString); + if (privateKey.length !== 32) { + throw new Error(`secp256k1 private key must be 32 bytes, got ${privateKey.length}`); + } + this.privateKey = privateKey; + } + + getPublicKey(): Uint8Array { + // For secp256k1 CurioAuth, the "public key" field is the Filecoin address bytes + return this.addressBytes; + } + + /** + * Produce Filecoin signature envelope bytes: [SigType=0x01] || [65-byte secp256k1 signature (R||S||V)] + */ + async sign(message: Uint8Array): Promise { + const secp = await getSecp(); + const digest = await AuthUtils.blake2b256(message); + const sigObj = secp.sign(digest, this.privateKey); // returns Signature with recovery + const sig = typeof sigObj.toCompactRawBytes === 'function' ? sigObj.toCompactRawBytes() : sigObj.toBytes(); + const recid = sigObj.recovery ?? 0; + if (!(sig instanceof Uint8Array) || sig.length !== 64) throw new Error('unexpected secp256k1 signature size'); + const data = new Uint8Array(1 + 65); + data[0] = 0x01; // fcrypto.SigTypeSecp256k1 + data.set(sig, 1); + data[1 + 64] = recid & 0xff; + return data; + } + + /** Parse Filecoin f1/t1 address string to address bytes: [protocol (1)] || payload (20). */ + static addressBytesFromString(address: string): Uint8Array { + if (!address || address.length < 3) throw new Error('invalid address'); + const net = address[0]; + if (net !== 'f' && net !== 't') throw new Error('invalid network prefix'); + const protoCh = address[1]; + if (protoCh !== '1') throw new Error('unsupported protocol: only secp256k1 (1) supported'); + const b32 = address.slice(2).toLowerCase(); + const decoded = Secp256k1AddressSigner.base32Decode(b32); + if (decoded.length < 4 + 20) throw new Error('invalid address payload'); + const payload = decoded.slice(0, decoded.length - 4); // drop checksum (last 4 bytes) + if (payload.length !== 20) throw new Error('invalid secp256k1 payload length'); + const out = new Uint8Array(1 + payload.length); + out[0] = 0x01; // protocol 1 + out.set(payload, 1); + return out; + } + + /** Base32 decode with alphabet 'abcdefghijklmnopqrstuvwxyz234567'. */ + private static base32Decode(s: string): Uint8Array { + const alphabet = 'abcdefghijklmnopqrstuvwxyz234567'; + const map: Record = {}; + for (let i = 0; i < alphabet.length; i++) map[alphabet[i]] = i; + let bits = 0; + let value = 0; + const out: number[] = []; + for (let i = 0; i < s.length; i++) { + const ch = s[i]; + if (ch === '=') break; + const v = map[ch]; + if (v === undefined) throw new Error('invalid base32 character'); + value = (value << 5) | v; + bits += 5; + if (bits >= 8) { + out.push((value >> (bits - 8)) & 0xff); + bits -= 8; + value &= (1 << bits) - 1; + } + } + return new Uint8Array(out); + } +} + +// Utility functions for authentication and client management + +/** + * Build authentication header from configuration + */ +export async function buildAuthHeader(config: AuthConfig): Promise { + if (config.keyType === 'ed25519') { + if (!config.publicKeyB64 || !config.privateKeyB64) { + throw new Error('PDP_PUBLIC_KEY_B64 and PDP_PRIVATE_KEY_B64 must be set for ed25519'); + } + const pub = Uint8Array.from(Buffer.from(config.publicKeyB64, 'base64')); + const priv = Uint8Array.from(Buffer.from(config.privateKeyB64, 'base64')); + const signer = new Ed25519KeypairSigner(pub, priv); + return await AuthUtils.buildAuthHeader(signer, 'ed25519'); + } else if (config.keyType === 'secp256k1') { + // Derive pubKeyBase64 from Filecoin address bytes + const addrBytes = Secp256k1AddressSigner.addressBytesFromString(config.clientAddr); + const pubB64 = Buffer.from(addrBytes).toString('base64'); + if (!pubB64) throw new Error('Unable to derive address bytes from PDP_CLIENT'); + + // Load secp256k1 private key from env (HEX preferred, else B64) + let priv: Uint8Array | undefined; + if (config.secpPrivateKeyHex) { + const clean = config.secpPrivateKeyHex.startsWith('0x') ? config.secpPrivateKeyHex.slice(2) : config.secpPrivateKeyHex; + if (clean.length !== 64) throw new Error('PDP_SECP_PRIVATE_KEY_HEX must be 32-byte (64 hex chars)'); + const bytes = new Uint8Array(32); + for (let i = 0; i < 32; i++) bytes[i] = parseInt(clean.substr(i * 2, 2), 16); + priv = bytes; + } else if (config.secpPrivateKeyB64) { + const buf = Buffer.from(config.secpPrivateKeyB64, 'base64'); + if (buf.length !== 32) throw new Error('PDP_SECP_PRIVATE_KEY_B64 must decode to 32 bytes'); + priv = new Uint8Array(buf); + } + if (!priv) throw new Error('Set PDP_SECP_PRIVATE_KEY_HEX or PDP_SECP_PRIVATE_KEY_B64 for secp256k1 signing'); + + // Use Secp256k1AddressSigner (address bytes derived from PDP_CLIENT) + const signer = new Secp256k1AddressSigner(config.clientAddr, priv); + return await AuthUtils.buildAuthHeader(signer, 'secp256k1'); + } else { + throw new Error(`Unsupported PDP_KEY_TYPE: ${config.keyType}`); + } +} + +/** + * Create authenticated client from configuration and auth header + */ +export function createClient(config: AuthConfig, authHeader: string): any { + const clientConfig = { + serverUrl: config.serverUrl, + headers: { Authorization: authHeader }, + }; + // Use the same pattern as the original unpkg-end-to-end.ts file + return new (require('./client').MarketClient)(clientConfig); +} + +/** + * Sanitize auth header for logging (removes sensitive signature data) + */ +export function sanitizeAuthHeader(authHeader: string): string { + return authHeader.replace(/:[A-Za-z0-9+/=]{16,}:/, (m) => `:${m.slice(1, 9)}...:`); +} + +/** + * Run preflight connectivity checks + */ +export async function runPreflightChecks(config: AuthConfig, authHeader: string): Promise { + try { + const base = config.serverUrl.replace(/\/$/, ''); + const urls: Array<{ url: string; headers?: Record }> = [ + { url: `${base}/health` }, + { url: `${base}/market/mk20/info/swagger.json` }, + { url: `${base}/market/mk20/products`, headers: { Authorization: authHeader } }, + ]; + + for (const { url, headers } of urls) { + try { + const init: RequestInit = headers ? { headers } : {}; + const r = await fetch(url, init); + console.log(`Preflight ${url}:`, r.status); + if (!r.ok) { + const text = await r.text().catch(() => ''); + console.log(`Preflight body (${url}):`, text); + } + } catch (e) { + const err = e as any; + console.error(`Preflight failed (${url}):`, err?.message || String(e), err?.cause?.code || '', err?.code || ''); + } + } + } catch (e) { + console.error('Preflight orchestrator failed:', (e as Error).message); + } +} + + diff --git a/market/mk20/tsclient/src/client.ts b/market/mk20/tsclient/src/client.ts new file mode 100644 index 000000000..08b7f5797 --- /dev/null +++ b/market/mk20/tsclient/src/client.ts @@ -0,0 +1,557 @@ +import { DefaultApi, ConfigurationParameters, Mk20Deal, Mk20DealProductStatusResponse, Mk20SupportedContracts, Mk20SupportedProducts, Mk20SupportedDataSources, Mk20Products, Mk20PDPV1, Mk20RetrievalV1, Mk20DDOV1, Mk20DataSource, Mk20DealState } from '../generated'; +import { monotonicFactory } from 'ulid'; +import { Configuration } from '../generated/runtime'; +import { Mk20StartUpload } from '../generated/models/Mk20StartUpload'; +import { StreamingPDP } from './streaming'; +import { calculate as calculatePieceCID } from './piece'; + +const ulid = monotonicFactory(() => Math.random()); +export interface MarketClientConfig extends Omit { + serverUrl: string; // e.g. http://localhost:8080 +} + +/** + * Utility class for computing Filecoin piece CID v2 from blobs + * Uses the better implementation from piece.ts + */ +export class PieceCidUtils { + /** + * Compute piece CID v2 from an array of blobs + * @param blobs - Array of Blob objects + * @returns Promise - Piece CID v2 as a string + */ + static async computePieceCidV2(blobs: Blob[]): Promise { + try { + // Concatenate all blob data + const totalSize = blobs.reduce((sum, blob) => sum + blob.size, 0); + const concatenatedData = new Uint8Array(totalSize); + let offset = 0; + + for (const blob of blobs) { + const arrayBuffer = await blob.arrayBuffer(); + const uint8Array = new Uint8Array(arrayBuffer); + concatenatedData.set(uint8Array, offset); + offset += uint8Array.length; + } + + // Use the better piece.ts implementation + const pieceCID = calculatePieceCID(concatenatedData); + return pieceCID.toString(); + } catch (error) { + throw new Error(`Failed to compute piece CID v2: ${error}`); + } + } +} + +export class MarketClient { + private api: DefaultApi; + private config: MarketClientConfig; + + /** + * Try to extract a human-friendly error string from an HTTP Response. + */ + private async formatHttpError(prefix: string, resp: Response): Promise { + const status = resp.status; + const statusText = resp.statusText || ''; + const h = resp.headers; + const reasonHeader = h.get('Reason') || h.get('reason') || h.get('X-Reason') || h.get('x-reason') || h.get('X-Error') || h.get('x-error') || ''; + let body = ''; + try { + // clone() to avoid consuming the body in case other handlers need it + body = await resp.clone().text(); + } catch {} + const details = [reasonHeader?.trim(), body?.trim()].filter(Boolean).join(' | '); + const statusPart = statusText ? `${status} ${statusText}` : String(status); + return `${prefix} (HTTP ${statusPart})${details ? `: ${details}` : ''}`; + } + + /** + * Create a MarketClient instance. + * @param config - Configuration object + * @param config.serverUrl - Base server URL, e.g. http://localhost:8080 + * @param config.headers - Optional default headers to send with every request + * @param config.fetchApi - Optional custom fetch implementation + */ + constructor(config: MarketClientConfig) { + this.config = config; + const basePath = `${config.serverUrl.replace(/\/$/, '')}/market/mk20`; + const runtimeConfig = { ...config, basePath } as ConfigurationParameters; + this.api = new DefaultApi(new Configuration(runtimeConfig)); + } + + /** + * Factory: create a StreamingPDP helper bound to this client instance + */ + /** + * Create a StreamingPDP helper bound to this client instance. + * @param params - Streaming parameters + * @param params.client - Client wallet address + * @param params.provider - Provider wallet address + * @param params.contractAddress - Verification contract address + * @param params.chunkSize - Optional chunk size in bytes (default 1MB) + */ + streamingPDP(params: { client: string; provider: string; contractAddress: string; chunkSize?: number }): StreamingPDP { + return new StreamingPDP(this, params); + } + + /** + * Convert a ULID string (26-char Crockford base32) into an ASCII byte array + */ + private ulidToBytes(ulidString: string): number[] { + // ULID is 26 characters, convert to ASCII byte array + const bytes: number[] = []; + for (let i = 0; i < ulidString.length; i++) { + bytes.push(ulidString.charCodeAt(i)); + } + return bytes; + } + + /** + * Get supported contracts + */ + async getContracts(): Promise { + try { + const response = await this.api.contractsGet(); + return response.contracts || []; + } catch (error) { + throw new Error(`Failed to get contracts: ${error}`); + } + } + + /** + * Get supported products + */ + async getProducts(): Promise { + try { + const response = await this.api.productsGet(); + return response; + } catch (error) { + throw new Error(`Failed to get products: ${error}`); + } + } + + /** + * Get supported data sources + */ + async getSources(): Promise { + try { + const response = await this.api.sourcesGet(); + return response; + } catch (error) { + throw new Error(`Failed to get sources: ${error}`); + } + } + + /** + * Get deal status by ID + */ + /** + * Get deal status by ID. + * @param id - Deal identifier (string ULID returned from submit wrappers) + */ + async getStatus(id: string): Promise { + try { + const response = await this.api.statusIdGet({ id }); + return response; + } catch (error) { + throw new Error(`Failed to get deal status for ${id}: ${error}`); + } + } + + /** + * Submit a new deal + */ + /** + * Submit a new deal. + * @param deal - Deal payload matching Mk20Deal schema + */ + async submitDeal(deal: Mk20Deal): Promise { + try { + // Use Raw call so we can inspect/handle non-JSON responses gracefully + const apiResp = await this.api.storePostRaw({ body: deal }); + const ct = apiResp.raw.headers.get('content-type') || ''; + if (ct.toLowerCase().includes('application/json') || ct.toLowerCase().includes('+json')) { + return await apiResp.value(); + } + // Treat non-JSON 2xx as success; return HTTP status code + return apiResp.raw.status; + } catch (error: any) { + // If this is a ResponseError, try to extract HTTP status and body text + const resp = error?.response as Response | undefined; + if (resp) { + const msg = await this.formatHttpError('Failed to submit deal', resp); + throw new Error(msg); + } + // Fallback + throw new Error(`Failed to submit deal: ${error?.message || String(error)}`); + } + } + + + /** + * Calculate piece ID for an individual blob based on its content + * @param blob - The blob to calculate piece ID for + * @returns Promise - A unique piece ID for this blob + */ + private async calculateBlobPieceId(blob: Blob): Promise { + // Create a hash from the blob's content to generate a unique piece ID + const arrayBuffer = await blob.arrayBuffer(); + const uint8Array = new Uint8Array(arrayBuffer); + + let hash = 0; + for (let i = 0; i < uint8Array.length; i++) { + hash = ((hash << 5) - hash) + uint8Array[i]; + hash = hash & hash; // Convert to 32-bit integer + } + + // Add size to the hash to make it more unique + hash = ((hash << 5) - hash) + blob.size; + hash = hash & hash; + + // Ensure positive and within reasonable bounds + return Math.abs(hash) % 1000000; // Keep within 6 digits + } + + async waitDealComplete(id: string): Promise { + var duration = 0; + const step = 10000; + while (true) { + const resp = await this.getStatus(id); + if (resp?.pdpV1?.status === Mk20DealState.DealStateComplete) { + break + } + + await new Promise(resolve => setTimeout(resolve, step)); + duration += step; + if (duration > 90000) { + throw new Error(`Deal ${id} timed out after ${duration} seconds`); + } + } + } + + /** + * Start a PDPv1 deal in two steps and prepare for upload. + * - Step 1: createDataSet + * - Step 2: addPiece with data descriptor (pieceCid, raw format, HTTP PUT source) + * Returns the upload identifier (ULID), computed pieceCid, total size, and the deal payload + * that should be used at finalize time. + */ + async startPDPv1DealForUpload(params: { + blobs: Blob[]; + client: string; + recordKeeper: string; + contractAddress: string; + }): Promise<{ id: string; totalSize: number; dealId: number; pieceCid: string; deal: Mk20Deal }> { + const { blobs, client, recordKeeper } = params; + + // Calculate total size and compute piece CID from blobs + const totalSize = blobs.reduce((sum, blob) => sum + blob.size, 0); + const pieceCid = await PieceCidUtils.computePieceCidV2(blobs); + + // Step 1: create dataset with a fresh identifier + const datasetId = ulid(); + const createDeal: Mk20Deal = { + identifier: datasetId, + client, + products: { + pdpV1: { + createDataSet: true, + addPiece: false, + recordKeeper: recordKeeper, + extraData: [], + deleteDataSet: false, + deletePiece: false, + } as Mk20PDPV1, + retrievalV1: { + announcePayload: true, + announcePiece: true, + indexing: true, + } as Mk20RetrievalV1, + } as Mk20Products, + } as Mk20Deal; + + await this.submitDeal(createDeal); + await this.waitDealComplete(datasetId); + + var datasetIdNumber = 0; // TODO: get dataset id from response + + // Step 2: add piece with data under a new identifier (upload id) + const uploadId = ulid(); + const addPieceDeal: Mk20Deal = { + identifier: uploadId, + client, + data: { + pieceCid: { "/": pieceCid } as object, + format: { raw: {} }, + sourceHttpPut: {}, + } as Mk20DataSource, + products: { + pdpV1: { + addPiece: true, + dataSetId: datasetIdNumber, + recordKeeper: recordKeeper, + extraData: [], + deleteDataSet: false, + deletePiece: false, + } as Mk20PDPV1, + retrievalV1: { + announcePayload: false, // not a CAR file. + announcePiece: true, + indexing: false, // not a CAR file. + } as Mk20RetrievalV1, + } as Mk20Products, + } as Mk20Deal; + + const dealId = await this.submitDeal(addPieceDeal); + await this.waitDealComplete(uploadId); + + return { id: uploadId, totalSize, dealId, pieceCid, deal: addPieceDeal }; + } + + /** + * Upload a set of blobs in chunks and finalize the upload for a given deal id. + * Optionally accepts the deal payload to finalize with. + */ + async uploadBlobs(params: { id: string; blobs: Blob[]; deal?: Mk20Deal; chunkSize?: number }): Promise<{ id: string; uploadedChunks: number; uploadedBytes: number; finalizeCode: number }> { + const { id, blobs } = params; + const chunkSize = params.chunkSize ?? 1024 * 1024; // default 1MB + + const totalSize = blobs.reduce((sum, blob) => sum + blob.size, 0); + + // Initialize chunked upload + const startUpload: Mk20StartUpload = { rawSize: totalSize, chunkSize }; + await this.initializeChunkedUpload(id, startUpload); + + // Upload chunks sequentially + let totalChunks = 0; + let uploadedBytes = 0; + for (const blob of blobs) { + for (let offset = 0; offset < blob.size; offset += chunkSize) { + const chunk = blob.slice(offset, offset + chunkSize); + const chunkArray = new Uint8Array(await chunk.arrayBuffer()); + const chunkNumbers = Array.from(chunkArray); + const chunkNum = String(totalChunks + 1); + await this.uploadChunk(id, chunkNum, chunkNumbers); + totalChunks++; + uploadedBytes += chunkNumbers.length; + } + } + + // Finalize + const finalizeCode = await this.finalizeChunkedUpload(id, params.deal); + return { id, uploadedChunks: totalChunks, uploadedBytes, finalizeCode }; + } + /** + * Simple convenience wrapper for PDPv1 deals with chunked upload + * Takes blobs and required addresses, computes piece_cid, and returns a UUID identifier + */ + /** + * Convenience wrapper for PDPv1 deals with chunked upload. + * @param params - Input parameters + * @param params.blobs - Data to upload as an array of blobs + * @param params.client - Client wallet address + * @param params.provider - Provider wallet address + * @param params.contractAddress - Verification contract address + * @returns Upload metadata including uuid, pieceCid, and stats + */ + async submitPDPv1DealWithUpload(params: { + blobs: Blob[]; + client: string; + recordKeeper: string; + contractAddress: string; + }): Promise<{ + uuid: string; + totalSize: number; + dealId: number; + uploadId: string; + pieceCid: string; + uploadedChunks: number; + uploadedBytes: number; + }> { + try { + const prep = await this.startPDPv1DealForUpload(params); + const ures = await this.uploadBlobs({ id: prep.id, blobs: params.blobs, deal: prep.deal }); + return { + uuid: prep.id, + totalSize: prep.totalSize, + dealId: prep.dealId, + uploadId: prep.id, + pieceCid: prep.pieceCid , + uploadedChunks: ures.uploadedChunks, + uploadedBytes: ures.uploadedBytes, + }; + } catch (error) { + throw new Error(`Failed to submit PDPv1 deal with upload: ${error}`); + } + } + /** + * Upload deal data + */ + /** + * Upload all data in a single request (for small deals). + * @param id - Deal identifier + * @param data - Entire data payload as an array of bytes + */ + async uploadData(id: string, data: Array): Promise { + try { + await this.api.uploadIdPut({ id, body: data }); + } catch (error) { + throw new Error(`Failed to upload data for deal ${id}: ${error}`); + } + } + + /** + * Initialize chunked upload for a deal + * @param id - Deal identifier + * @param startUpload - Upload initialization data + */ + /** + * Initialize chunked upload for a deal. + * @param id - Deal identifier + * @param startUpload - Upload init payload (chunkSize, rawSize) + */ + async initializeChunkedUpload(id: string, startUpload: Mk20StartUpload): Promise { + try { + const apiResp = await this.api.uploadsIdPostRaw({ id, data: startUpload }); + const ct = apiResp.raw.headers.get('content-type') || ''; + if (ct.toLowerCase().includes('application/json') || ct.toLowerCase().includes('+json')) { + return await apiResp.value(); + } + // Treat non-JSON 2xx as success; return HTTP status code + return apiResp.raw.status; + } catch (error: any) { + const resp = error?.response as Response | undefined; + if (resp) { + const msg = await this.formatHttpError(`Failed to initialize chunked upload for deal ${id}`, resp); + throw new Error(msg); + } + throw new Error(`Failed to initialize chunked upload for deal ${id}: ${error?.message || String(error)}`); + } + } + + /** + * Upload a chunk of data for a deal + * @param id - Deal identifier + * @param chunkNum - Chunk number + * @param data - Chunk data + */ + /** + * Upload one chunk for a deal. + * @param id - Deal identifier + * @param chunkNum - Chunk index as string (0-based) + * @param data - Chunk data bytes + */ + async uploadChunk(id: string, chunkNum: string, data: Array): Promise { + try { + const apiResp = await this.api.uploadsIdChunkNumPutRaw({ id, chunkNum, data }, { + headers: { + 'Content-Type': 'application/octet-stream', + 'Authorization': this.config.headers?.Authorization || '' + } + }); + const ct = apiResp.raw.headers.get('content-type') || ''; + if (ct.toLowerCase().includes('application/json') || ct.toLowerCase().includes('+json')) { + return await apiResp.value(); + } + // Treat non-JSON 2xx as success; return HTTP status code + return apiResp.raw.status; + } catch (error: any) { + const resp = error?.response as Response | undefined; + if (resp) { + const msg = await this.formatHttpError(`Failed to upload chunk ${chunkNum} for deal ${id}`, resp); + throw new Error(msg); + } + throw new Error(`Failed to upload chunk ${chunkNum} for deal ${id}: ${error?.message || String(error)}`); + } + } + + /** + * Finalize chunked upload for a deal + * @param id - Deal identifier + * @param deal - Optional deal data for finalization + */ + /** + * Finalize a chunked upload. + * @param id - Deal identifier + * @param deal - Optional deal payload to finalize with + */ + async finalizeChunkedUpload(id: string, deal?: any): Promise { + try { + const apiResp = await this.api.uploadsFinalizeIdPostRaw({ id, body: deal }); + const ct = apiResp.raw.headers.get('content-type') || ''; + if (ct.toLowerCase().includes('application/json') || ct.toLowerCase().includes('+json')) { + return await apiResp.value(); + } + // Treat non-JSON 2xx as success; return HTTP status code + return apiResp.raw.status; + } catch (error: any) { + const resp = error?.response as Response | undefined; + if (resp) { + const msg = await this.formatHttpError(`Failed to finalize chunked upload for deal ${id}`, resp); + throw new Error(msg); + } + throw new Error(`Failed to finalize chunked upload for deal ${id}: ${error?.message || String(error)}`); + } + } + + /** + * Finalize a serial (single PUT) upload. + * @param id - Deal identifier (ULID string) + * @param deal - Optional deal payload to finalize with + */ + async finalizeSerialUpload(id: string, deal?: Mk20Deal): Promise { + try { + const apiResp = await this.api.uploadIdPostRaw({ id, body: deal }); + const ct = apiResp.raw.headers.get('content-type') || ''; + if (ct.toLowerCase().includes('application/json') || ct.toLowerCase().includes('+json')) { + return await apiResp.value(); + } + // Treat non-JSON 2xx as success; return HTTP status code + return apiResp.raw.status; + } catch (error: any) { + const resp = error?.response as Response | undefined; + if (resp) { + const msg = await this.formatHttpError(`Failed to finalize serial upload for deal ${id}`, resp); + throw new Error(msg); + } + throw new Error(`Failed to finalize serial upload for deal ${id}: ${error?.message || String(error)}`); + } + } + + /** + * Get upload status for a deal + * @param id - Deal identifier + */ + /** + * Get upload status for a deal. + * @param id - Deal identifier + */ + async getUploadStatus(id: string): Promise { + try { + return await this.api.uploadsIdGet({ id }); + } catch (error) { + throw new Error(`Failed to get upload status for deal ${id}: ${error}`); + } + } + + /** + * Update an existing deal (e.g., request deletion via PDPv1 flags). + * @param id - Deal identifier (ULID string) + * @param deal - Deal payload with updated products + */ + async updateDeal(id: string, deal: Mk20Deal): Promise { + try { + const result = await this.api.updateIdGet({ id, body: deal }); + return result; + } catch (error) { + throw new Error(`Failed to update deal ${id}: ${error}`); + } + } + + /** + * Get info (placeholder method for compatibility) + */ + async getInfo(): Promise { + throw new Error('Failed to get info: Error: Info endpoint not available in generated API'); + } +} diff --git a/market/mk20/tsclient/src/index.ts b/market/mk20/tsclient/src/index.ts new file mode 100644 index 000000000..9cb28212d --- /dev/null +++ b/market/mk20/tsclient/src/index.ts @@ -0,0 +1,50 @@ +// Export the generated client and types +export * from '../generated'; + +// Import everything we need for the CurioMarket object +import { DefaultApi as MarketClient } from '../generated'; +import { MarketClient as Client, PieceCidUtils } from './client'; +import { StreamingPDP } from './streaming'; +import { AuthUtils, Ed25519KeypairSigner, Secp256k1AddressSigner } from './auth'; +import { calculate as calculatePieceCID, asPieceCID, asLegacyPieceCID, createPieceCIDStream } from './piece'; + +// Top-level export that encompasses all exports with nice names +export const CurioMarket = { + // Classes and utilities + MarketClient, + Client, + PieceCidUtils, + StreamingPDP, + AuthUtils, + Ed25519KeypairSigner, + Secp256k1AddressSigner, + calculatePieceCID, + asPieceCID, + asLegacyPieceCID, + createPieceCIDStream, +} as const; + +// Export types with nice names +export type { + Mk20Deal as Deal, + Mk20DataSource as DataSource, + Mk20Products as Products, + Mk20DDOV1 as DDOV1, + Mk20PDPV1 as PDPV1, + Mk20RetrievalV1 as RetrievalV1, + Mk20DealProductStatusResponse as DealProductStatusResponse, + Mk20SupportedContracts as SupportedContracts, + Mk20SupportedProducts as SupportedProducts, + Mk20SupportedDataSources as SupportedDataSources, + Mk20DealCode as DealCode, + Mk20StartUpload as StartUpload, + Mk20UploadCode as UploadCode, + Mk20UploadStartCode as UploadStartCode, + Mk20UploadStatus as UploadStatus, + Mk20UploadStatusCode as UploadStatusCode, + Configuration +} from '../generated'; + +export type { MarketClientConfig } from './client'; +export type { AuthSigner } from './auth'; +export type { PieceCID, LegacyPieceCID } from './piece'; \ No newline at end of file diff --git a/market/mk20/tsclient/src/piece.ts b/market/mk20/tsclient/src/piece.ts new file mode 100644 index 000000000..a0ecb238b --- /dev/null +++ b/market/mk20/tsclient/src/piece.ts @@ -0,0 +1,206 @@ +/** + * PieceCID (Piece Commitment CID) utilities + * + * Helper functions for working with Filecoin Piece CIDs + */ + +import type { LegacyPieceLink as LegacyPieceCIDType, PieceLink as PieceCIDType } from '@web3-storage/data-segment' +import * as Hasher from '@web3-storage/data-segment/multihash' +import { CID } from 'multiformats/cid' +import * as Raw from 'multiformats/codecs/raw' +import * as Digest from 'multiformats/hashes/digest' +import * as Link from 'multiformats/link' + +const FIL_COMMITMENT_UNSEALED = 0xf101 +const SHA2_256_TRUNC254_PADDED = 0x1012 + +/** + * PieceCID - A constrained CID type for Piece Commitments. + * This is implemented as a Link type which is made concrete by a CID. A + * PieceCID uses the raw codec (0x55) and the fr32-sha256-trunc254-padbintree + * multihash function (0x1011) which encodes the base content length (as + * padding) of the original piece, and the height of the merkle tree used to + * hash it. + * + * See https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0069.md + * for more information. + */ +export type PieceCID = PieceCIDType + +/** + * LegacyPieceCID - A constrained CID type for Legacy Piece Commitments. + * This is implemented as a Link type which is made concrete by a CID. A + * LegacyPieceCID uses the fil-commitment-unsealed codec (0xf101) and the + * sha2-256-trunc254-padded (0x1012) multihash function. + * This 32 bytes of the hash digest in a LegacyPieceCID is the same as the + * equivalent PieceCID, but a LegacyPieceCID does not encode the length or + * tree height of the original raw piece. A PieceCID can be converted to a + * LegacyPieceCID, but not vice versa. + * LegacyPieceCID is commonly known as "CommP" or simply "Piece Commitment" + * in Filecoin. + */ +export type LegacyPieceCID = LegacyPieceCIDType + +/** + * Parse a PieceCID string into a CID and validate it + * @param pieceCidString - The PieceCID as a string (base32 or other multibase encoding) + * @returns The parsed and validated PieceCID CID or null if invalid + */ +function parsePieceCID(pieceCidString: string): PieceCID | null { + try { + const cid = CID.parse(pieceCidString) + if (isValidPieceCID(cid)) { + return cid as PieceCID + } + } catch { + // ignore error + } + return null +} + +/** + * Parse a LegacyPieceCID string into a CID and validate it + * @param pieceCidString - The LegacyPieceCID as a string (base32 or other multibase encoding) + * @returns The parsed and validated LegacyPieceCID CID or null if invalid + */ +function parseLegacyPieceCID(pieceCidString: string): LegacyPieceCID | null { + try { + const cid = CID.parse(pieceCidString) + if (isValidLegacyPieceCID(cid)) { + return cid as LegacyPieceCID + } + } catch { + // ignore error + } + return null +} + +/** + * Check if a CID is a valid PieceCID + * @param cid - The CID to check + * @returns True if it's a valid PieceCID + */ +function isValidPieceCID(cid: PieceCID | CID): cid is PieceCID { + return cid.code === Raw.code && cid.multihash.code === Hasher.code +} + +/** + * Check if a CID is a valid LegacyPieceCID + * @param cid - The CID to check + * @returns True if it's a valid LegacyPieceCID + */ +function isValidLegacyPieceCID(cid: LegacyPieceCID | CID): cid is LegacyPieceCID { + return cid.code === FIL_COMMITMENT_UNSEALED && cid.multihash.code === SHA2_256_TRUNC254_PADDED +} + +/** + * Convert a PieceCID input (string or CID) to a validated CID + * This is the main function to use when accepting PieceCID inputs + * @param pieceCidInput - PieceCID as either a CID object or string + * @returns The validated PieceCID CID or null if not a valid PieceCID + */ +export function asPieceCID(pieceCidInput: PieceCID | CID | string): PieceCID | null { + if (typeof pieceCidInput === 'string') { + return parsePieceCID(pieceCidInput) + } + + if (typeof pieceCidInput === 'object' && CID.asCID(pieceCidInput as CID) !== null) { + // It's already a CID, validate it + if (isValidPieceCID(pieceCidInput as CID)) { + return pieceCidInput as PieceCID + } + } + + // Nope + return null +} + +/** + * Convert a LegacyPieceCID input (string or CID) to a validated CID + * This function can be used to parse a LegacyPieceCID (CommPv1) or to downgrade a PieceCID + * (CommPv2) to a LegacyPieceCID. + * @param pieceCidInput - LegacyPieceCID as either a CID object or string + * @returns The validated LegacyPieceCID CID or null if not a valid LegacyPieceCID + */ +export function asLegacyPieceCID(pieceCidInput: PieceCID | LegacyPieceCID | CID | string): LegacyPieceCID | null { + const pieceCid = asPieceCID(pieceCidInput as CID | string) + if (pieceCid != null) { + // downgrade to LegacyPieceCID + const digest = Digest.create(SHA2_256_TRUNC254_PADDED, pieceCid.multihash.digest.subarray(-32)) + return Link.create(FIL_COMMITMENT_UNSEALED, digest) as LegacyPieceCID + } + + if (typeof pieceCidInput === 'string') { + return parseLegacyPieceCID(pieceCidInput) + } + + if (typeof pieceCidInput === 'object' && CID.asCID(pieceCidInput as CID) !== null) { + // It's already a CID, validate it + if (isValidLegacyPieceCID(pieceCidInput as CID)) { + return pieceCidInput as LegacyPieceCID + } + } + + // Nope + return null +} + +/** + * Calculate the PieceCID (Piece Commitment) for a given data blob + * @param data - The binary data to calculate the PieceCID for + * @returns The calculated PieceCID CID + */ +export function calculate(data: Uint8Array): PieceCID { + // TODO: consider https://github.com/storacha/fr32-sha2-256-trunc254-padded-binary-tree-multihash + // for more efficient PieceCID calculation in WASM + const hasher = Hasher.create() + // We'll get slightly better performance by writing in chunks to let the + // hasher do its work incrementally + const chunkSize = 2048 + for (let i = 0; i < data.length; i += chunkSize) { + hasher.write(data.subarray(i, i + chunkSize)) + } + const digest = hasher.digest() + return Link.create(Raw.code, digest) +} + +/** + * Create a TransformStream that calculates PieceCID while streaming data through it + * This allows calculating PieceCID without buffering the entire data in memory + * + * @returns An object with the TransformStream and a getPieceCID function to retrieve the result + */ +export function createPieceCIDStream(): { + stream: TransformStream + getPieceCID: () => PieceCID | null +} { + const hasher = Hasher.create() + let finished = false + let pieceCid: PieceCID | null = null + + const stream = new TransformStream({ + transform(chunk: Uint8Array, controller: TransformStreamDefaultController) { + // Write chunk to hasher + hasher.write(chunk) + // Pass chunk through unchanged + controller.enqueue(chunk) + }, + + flush() { + // Calculate final PieceCID when stream ends + const digest = hasher.digest() + pieceCid = Link.create(Raw.code, digest) + finished = true + }, + }) + + return { + stream, + getPieceCID: () => { + if (!finished) { + return null + } + return pieceCid + }, + } +} diff --git a/market/mk20/tsclient/src/streaming.ts b/market/mk20/tsclient/src/streaming.ts new file mode 100644 index 000000000..005cf6a3e --- /dev/null +++ b/market/mk20/tsclient/src/streaming.ts @@ -0,0 +1,206 @@ +import { MarketClient as Client } from './client'; +import type { Mk20Deal as Deal, Mk20Products as Products, Mk20PDPV1 as PDPV1, Mk20RetrievalV1 as RetrievalV1, Mk20DataSource, Mk20PieceDataFormat } from '../generated'; +import { ulid } from 'ulid'; +import { createPieceCIDStream, type PieceCID } from './piece'; + +// Removed old custom implementation - now using piece.ts streaming functions + +/** + * StreamingPDP provides a streaming workflow to create a deal without a data section, + * push data via chunked upload, compute the piece CID while streaming, and finalize. + */ +export class StreamingPDP { + private client: Client; + private id: string; + private identifierBytes: number[]; + private totalSize = 0; + private deal: Deal | undefined; + private clientAddr: string; + private providerAddr: string; + private contractAddress: string; + private chunkSize: number; + private buffer: number[] = []; + private nextChunkNum = 0; + private uploadedBytes = 0; + private totalChunks = 0; + private pieceCIDStream: { stream: TransformStream; getPieceCID: () => PieceCID | null }; + private writer: WritableStreamDefaultWriter; + + /** + * @param client - Market client instance + * @param opts - Streaming options + * @param opts.client - Client wallet address + * @param opts.provider - Provider wallet address + * @param opts.contractAddress - Verification contract address + * @param opts.chunkSize - Optional chunk size in bytes (default 1MB) + */ + constructor(client: Client, opts: { client: string; provider: string; contractAddress: string; chunkSize?: number }) { + this.client = client; + this.clientAddr = opts.client; + this.providerAddr = opts.provider; + this.contractAddress = opts.contractAddress; + this.chunkSize = opts.chunkSize ?? 1024 * 1024; + this.id = ulid(); + this.identifierBytes = Array.from(this.id).map(c => c.charCodeAt(0)).slice(0, 16); + while (this.identifierBytes.length < 16) this.identifierBytes.push(0); + + // Initialize streaming piece CID computation + this.pieceCIDStream = createPieceCIDStream(); + this.writer = this.pieceCIDStream.stream.writable.getWriter(); + } + + /** + * Begin the streaming deal by submitting a deal without data and initializing upload. + */ + async begin(): Promise { + var products: Products = { + pdpV1: { + createDataSet: true, + recordKeeper: this.providerAddr, + extraData: [], + pieceIds: undefined, + deleteDataSet: false, + deletePiece: false, + } as PDPV1, + retrievalV1: { + announcePayload: true, + announcePiece: true, + indexing: true, + } as RetrievalV1, + } as Products; + + const deal: Deal = { + identifier: this.id, + client: this.clientAddr, + products, + } as Deal; + + this.deal = deal; + await this.client.submitDeal(deal); + + await this.client.waitDealComplete(this.id); + + var products: Products = { + pdpV1: { + addPiece: true, + recordKeeper: this.providerAddr, + extraData: [], + pieceIds: undefined, + deleteDataSet: false, + deletePiece: false, + } as PDPV1, + retrievalV1: { + announcePayload: true, + announcePiece: true, + indexing: true, + } as RetrievalV1, + } as Products; + + await this.client.submitDeal({ + identifier: this.id, + client: this.clientAddr, + products, + } as Deal); + + + await this.client.initializeChunkedUpload(this.id, { chunkSize: this.chunkSize }); + } + + /** + * Write a chunk of data into the stream. This uploads full chunks immediately + * and buffers any remainder until the next write or commit. + * @param chunk - Data bytes to write + */ + async write(chunk: Uint8Array | Buffer): Promise { + const u8 = chunk instanceof Uint8Array ? chunk : new Uint8Array(chunk); + this.totalSize += u8.length; + + // Write to streaming piece CID computation + await this.writer.write(u8); + + let idx = 0; + if (this.buffer.length > 0) { + const needed = this.chunkSize - this.buffer.length; + const take = Math.min(needed, u8.length); + for (let i = 0; i < take; i++) this.buffer.push(u8[idx + i]); + idx += take; + if (this.buffer.length === this.chunkSize) { + const toSend = this.buffer.slice(0, this.chunkSize); + await this.uploadChunkNow(toSend); + this.buffer = []; + } + } + + while (u8.length - idx >= this.chunkSize) { + const sub = u8.subarray(idx, idx + this.chunkSize); + const toSend = Array.from(sub); + await this.uploadChunkNow(toSend); + idx += this.chunkSize; + } + + for (let i = idx; i < u8.length; i++) this.buffer.push(u8[i]); + } + + /** + * Finalize the streaming deal: flush remaining data, compute piece CID, and finalize. + * @returns Object containing id (ULID), pieceCid, and totalSize + */ + async commit(): Promise<{ id: string; pieceCid: string; totalSize: number }> { + if (!this.deal) throw new Error('StreamingPDP not started. Call begin() first.'); + + if (this.buffer.length > 0) { + const toSend = this.buffer.slice(); + await this.uploadChunkNow(toSend); + this.buffer = []; + } + + // Close the writer and get the piece CID + await this.writer.close(); + const pieceCID = this.pieceCIDStream.getPieceCID(); + if (!pieceCID) { + throw new Error('Failed to compute piece CID from stream'); + } + const pieceCid = pieceCID.toString(); + + const dataSource: Mk20DataSource = { + pieceCid: { "/": pieceCid } as { [key: string]: string; }, + format: { raw: {} } as Mk20PieceDataFormat, + sourceHttpPut: { raw_size: this.totalSize } as unknown as object, + }; + + const finalizedDeal: Deal = { + ...this.deal, + data: dataSource, + } as Deal; + + await this.client.finalizeChunkedUpload(this.id, finalizedDeal); + + return { id: this.id, pieceCid, totalSize: this.totalSize }; + } + + /** + * Upload a single chunk immediately. + * @param data - Chunk bytes + */ + private async uploadChunkNow(data: number[]): Promise { + const chunkNum = String(this.nextChunkNum); + await this.client.uploadChunk(this.id, chunkNum, data); + this.nextChunkNum++; + this.uploadedBytes += data.length; + this.totalChunks++; + } + + /** + * Clean up resources. Call this when done with the StreamingPDP instance. + */ + async cleanup(): Promise { + try { + await this.writer.close(); + } catch (error) { + // Writer might already be closed + } + } + +} + + diff --git a/market/mk20/tsclient/test-curio-market.ts b/market/mk20/tsclient/test-curio-market.ts new file mode 100644 index 000000000..75b8d50b1 --- /dev/null +++ b/market/mk20/tsclient/test-curio-market.ts @@ -0,0 +1,22 @@ +// Test script to verify CurioMarket object structure +import { CurioMarket } from './src'; + +console.log('CurioMarket object structure:'); +console.log('Available properties:', Object.keys(CurioMarket)); + +console.log('\nClasses available:'); +console.log('- MarketClient:', typeof CurioMarket.MarketClient); +console.log('- Client:', typeof CurioMarket.Client); +console.log('- PieceCidUtils:', typeof CurioMarket.PieceCidUtils); +console.log('- StreamingPDP:', typeof CurioMarket.StreamingPDP); +console.log('- AuthUtils:', typeof CurioMarket.AuthUtils); + +console.log('\nFunctions available:'); +console.log('- calculatePieceCID:', typeof CurioMarket.calculatePieceCID); +console.log('- asPieceCID:', typeof CurioMarket.asPieceCID); +console.log('- asLegacyPieceCID:', typeof CurioMarket.asLegacyPieceCID); +console.log('- createPieceCIDStream:', typeof CurioMarket.createPieceCIDStream); + +console.log('\n✅ CurioMarket object is properly structured!'); + + diff --git a/market/mk20/tsclient/tests/__mocks__/multiformats/cid.ts b/market/mk20/tsclient/tests/__mocks__/multiformats/cid.ts new file mode 100644 index 000000000..5b1639e3d --- /dev/null +++ b/market/mk20/tsclient/tests/__mocks__/multiformats/cid.ts @@ -0,0 +1,17 @@ +export class CID { + public code: number; + public multihash: any; + + constructor(code: number, multihash: any) { + this.code = code; + this.multihash = multihash; + } + + static create(version: number, code: number, multihash: any): CID { + return new CID(code, multihash); + } + + toString(): string { + return `bafybeihq6mbsd757cdm4sn6z5r7w6tdvkrb3q9iu3pjr7q3ip24c65qh2i`; + } +} diff --git a/market/mk20/tsclient/tests/__mocks__/multiformats/hashes/sha2.ts b/market/mk20/tsclient/tests/__mocks__/multiformats/hashes/sha2.ts new file mode 100644 index 000000000..ec39aa911 --- /dev/null +++ b/market/mk20/tsclient/tests/__mocks__/multiformats/hashes/sha2.ts @@ -0,0 +1,8 @@ +export const sha256 = { + async digest(data: Uint8Array): Promise { + return { + code: 0x12, + digest: new Uint8Array(32).fill(1) + }; + } +}; diff --git a/market/mk20/tsclient/tests/client.test.ts b/market/mk20/tsclient/tests/client.test.ts new file mode 100644 index 000000000..db7085e30 --- /dev/null +++ b/market/mk20/tsclient/tests/client.test.ts @@ -0,0 +1,121 @@ +import { MarketClient, MarketClientConfig } from '../src/client'; +import { mockResponse, mockError } from './setup'; + +// Mock the generated API +jest.mock('../generated', () => ({ + DefaultApi: jest.fn().mockImplementation(() => ({ + contractsGet: jest.fn(), + productsGet: jest.fn(), + sourcesGet: jest.fn(), + statusIdGet: jest.fn(), + storePost: jest.fn(), + uploadIdPut: jest.fn(), + })), +})); + +describe('MarketClient', () => { + let client: MarketClient; + let mockApi: any; + + beforeEach(() => { + const config: MarketClientConfig = { + serverUrl: 'http://localhost:8080', + } as MarketClientConfig; + + client = new MarketClient(config); + + // Get the mocked API instance and set up the mocks + const { DefaultApi } = require('../generated'); + mockApi = new DefaultApi(); + + // Set up the mock methods on the client's API instance + (client as any).api = mockApi; + }); + + describe('getContracts', () => { + it('should return contracts successfully', async () => { + const mockContracts = ['0x123', '0x456']; + mockApi.contractsGet.mockResolvedValue({ + contracts: mockContracts, + }); + + const result = await client.getContracts(); + expect(result).toEqual(mockContracts); + }); + + it('should handle errors gracefully', async () => { + mockApi.contractsGet.mockRejectedValue(new Error('API Error')); + + await expect(client.getContracts()).rejects.toThrow('Failed to get contracts: Error: API Error'); + }); + }); + + describe('getProducts', () => { + it('should return products successfully', async () => { + const mockProducts = { ddo_v1: true, pdp_v1: true }; + mockApi.productsGet.mockResolvedValue(mockProducts); + + const result = await client.getProducts(); + expect(result).toEqual(mockProducts); + }); + }); + + describe('getSources', () => { + it('should return sources successfully', async () => { + const mockSources = { http: true, aggregate: true }; + mockApi.sourcesGet.mockResolvedValue(mockSources); + + const result = await client.getSources(); + expect(result).toEqual(mockSources); + }); + }); + + describe('getStatus', () => { + it('should return deal status successfully', async () => { + const mockStatus = { identifier: 'test-id', status: 'active' }; + mockApi.statusIdGet.mockResolvedValue(mockStatus); + + const result = await client.getStatus('test-id'); + expect(result).toEqual(mockStatus); + }); + + it('should handle errors with deal ID context', async () => { + mockApi.statusIdGet.mockRejectedValue(new Error('Not Found')); + + await expect(client.getStatus('test-id')).rejects.toThrow('Failed to get deal status for test-id: Error: Not Found'); + }); + }); + + describe('submitDeal', () => { + it('should submit deal successfully', async () => { + const mockDeal = { identifier: '01H0EXAMPLEULIDIDENTIFIER00000000' }; + const mockResult = 200; // DealCode.Ok + mockApi.storePost.mockResolvedValue(mockResult); + + const result = await client.submitDeal(mockDeal); + expect(result).toEqual(mockResult); + }); + }); + + describe('uploadData', () => { + it('should upload data successfully', async () => { + const testData = [1, 2, 3, 4, 5, 6, 7, 8]; + mockApi.uploadIdPut.mockResolvedValue(undefined); + + await expect(client.uploadData('test-id', testData)).resolves.not.toThrow(); + }); + + it('should handle upload errors', async () => { + const testData = [1, 2, 3, 4, 5, 6, 7, 8]; + mockApi.uploadIdPut.mockRejectedValue(new Error('Upload failed')); + + await expect(client.uploadData('test-id', testData)).rejects.toThrow('Failed to upload data for deal test-id: Error: Upload failed'); + }); + }); + + describe('getInfo', () => { + it('should handle info endpoint not available', async () => { + await expect(client.getInfo()).rejects.toThrow('Failed to get info: Error: Info endpoint not available in generated API'); + }); + }); +}); diff --git a/market/mk20/tsclient/tests/examples.basic-usage.test.ts b/market/mk20/tsclient/tests/examples.basic-usage.test.ts new file mode 100644 index 000000000..7f0093b1a --- /dev/null +++ b/market/mk20/tsclient/tests/examples.basic-usage.test.ts @@ -0,0 +1,45 @@ +// Tests covering examples/basic-usage.ts + +jest.mock('../generated', () => ({ + DefaultApi: jest.fn().mockImplementation(() => ({ + contractsGet: jest.fn().mockResolvedValue({ contracts: ['0xabc'] }), + productsGet: jest.fn().mockResolvedValue({ ddo_v1: true, pdp_v1: true }), + sourcesGet: jest.fn().mockResolvedValue({ http: true, aggregate: true }), + statusIdGet: jest.fn().mockResolvedValue({ identifier: 'id', status: 'active' }), + storePost: jest.fn().mockResolvedValue(200), + uploadIdPut: jest.fn().mockResolvedValue(undefined), + uploadsIdPost: jest.fn().mockResolvedValue(200), + uploadsIdChunkNumPut: jest.fn().mockResolvedValue(200), + uploadsFinalizeIdPost: jest.fn().mockResolvedValue(200), + })), +})); + +import { exampleUsage, uploadDataExample, pieceIdCalculationExample } from '../examples/basic-usage'; +import { PieceCidUtils } from '../src'; + +describe('examples/basic-usage.ts', () => { + beforeAll(() => { + jest.spyOn(PieceCidUtils, 'computePieceCidV2').mockResolvedValue('btestcid'); + }); + + afterAll(() => { + jest.restoreAllMocks(); + }); + + it('runs exampleUsage without errors', async () => { + await expect(exampleUsage()).resolves.not.toThrow(); + }); + + it('runs uploadDataExample without errors', async () => { + await expect(uploadDataExample('deal-id', [1, 2, 3])).resolves.not.toThrow(); + }); + + it('runs pieceIdCalculationExample and returns results', async () => { + const res = await pieceIdCalculationExample(); + expect(res).toBeDefined(); + expect(res.dealId).toBe(200); + expect(res.pieceCid).toBe('btestcid'); + }); +}); + + diff --git a/market/mk20/tsclient/tests/examples.product-types.test.ts b/market/mk20/tsclient/tests/examples.product-types.test.ts new file mode 100644 index 000000000..53277348c --- /dev/null +++ b/market/mk20/tsclient/tests/examples.product-types.test.ts @@ -0,0 +1,46 @@ +// Tests covering examples/product-types.ts + +jest.mock('../generated', () => ({ + DefaultApi: jest.fn().mockImplementation(() => ({ + storePost: jest.fn().mockResolvedValue(200), + contractsGet: jest.fn().mockResolvedValue({ contracts: ['0xabc'] }), + productsGet: jest.fn().mockResolvedValue({ ddo_v1: true, pdp_v1: true }), + sourcesGet: jest.fn().mockResolvedValue({ http: true, aggregate: true }), + })), +})); + +import { pdpv1ProductExample, ddov1ProductExample, retrievalV1ProductExample, convenienceWrapperExample } from '../examples/product-types'; +import { PieceCidUtils } from '../src'; + +describe('examples/product-types.ts', () => { + beforeAll(() => { + jest.spyOn(PieceCidUtils, 'computePieceCidV2').mockResolvedValue('btestcid'); + }); + + afterAll(() => { + jest.restoreAllMocks(); + }); + + it('pdpv1ProductExample returns a deal structure', async () => { + const deal = await pdpv1ProductExample(); + expect(deal.products).toBeDefined(); + expect(deal.products?.pdpV1).toBeDefined(); + }); + + it('ddov1ProductExample returns a deal structure', async () => { + const deal = await ddov1ProductExample(); + expect(deal.products).toBeDefined(); + expect(deal.products?.ddoV1).toBeDefined(); + }); + + it('retrievalV1ProductExample returns a config', async () => { + const cfg = await retrievalV1ProductExample(); + expect(cfg.indexing).toBe(true); + }); + + it('convenienceWrapperExample runs without errors', async () => { + await expect(convenienceWrapperExample()).resolves.not.toThrow(); + }); +}); + + diff --git a/market/mk20/tsclient/tests/examples.streaming-pdp.test.ts b/market/mk20/tsclient/tests/examples.streaming-pdp.test.ts new file mode 100644 index 000000000..1b60b3965 --- /dev/null +++ b/market/mk20/tsclient/tests/examples.streaming-pdp.test.ts @@ -0,0 +1,43 @@ +// Tests covering examples/streaming-pdp.ts and StreamingPDP helper + +jest.mock('../generated', () => ({ + DefaultApi: jest.fn().mockImplementation(() => ({ + storePost: jest.fn().mockResolvedValue(200), + uploadsIdPost: jest.fn().mockResolvedValue(200), + uploadsIdChunkNumPut: jest.fn().mockResolvedValue(200), + uploadsFinalizeIdPost: jest.fn().mockResolvedValue(200), + })), +})); + +import { MarketClientConfig, Client, StreamingPDP, PieceCidUtils } from '../src'; + +describe('streaming-pdp example and helper', () => { + beforeAll(() => { + jest.spyOn(PieceCidUtils, 'computePieceCidV2').mockResolvedValue('btestcid'); + }); + + afterAll(() => { + jest.restoreAllMocks(); + }); + + it('streams data via StreamingPDP and finalizes', async () => { + const config: MarketClientConfig = { serverUrl: 'http://localhost:8080' } as MarketClientConfig; + const client = new Client(config); + const spdp = client.streamingPDP({ + client: 'f1client...', + provider: 'f1provider...', + contractAddress: '0x...', + }); + + await spdp.begin(); + spdp.write(new TextEncoder().encode('hello ')); + spdp.write(new TextEncoder().encode('world')); + const res = await spdp.commit(); + + expect(res.id).toBeDefined(); + expect(res.pieceCid).toMatch(/^b/); + expect(res.totalSize).toBeGreaterThan(0); + }); +}); + + diff --git a/market/mk20/tsclient/tests/setup.ts b/market/mk20/tsclient/tests/setup.ts new file mode 100644 index 000000000..b35a083e6 --- /dev/null +++ b/market/mk20/tsclient/tests/setup.ts @@ -0,0 +1,24 @@ +// Global test setup +import 'isomorphic-fetch'; + +// Mock fetch for testing +global.fetch = jest.fn(); + +// Reset mocks before each test +beforeEach(() => { + jest.clearAllMocks(); +}); + +// Test utilities +export const mockResponse = (data: any, status = 200) => { + return Promise.resolve({ + ok: status >= 200 && status < 300, + status, + json: () => Promise.resolve(data), + text: () => Promise.resolve(JSON.stringify(data)), + } as Response); +}; + +export const mockError = (status: number, message: string) => { + return Promise.reject(new Error(`${status}: ${message}`)); +}; diff --git a/market/mk20/tsclient/tsconfig.json b/market/mk20/tsclient/tsconfig.json new file mode 100644 index 000000000..b710e6cce --- /dev/null +++ b/market/mk20/tsclient/tsconfig.json @@ -0,0 +1,28 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "NodeNext", + "lib": ["ES2020", "DOM"], + "types": ["node"], + "declaration": true, + "outDir": "./dist", + "rootDir": ".", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "moduleResolution": "nodenext", + "allowSyntheticDefaultImports": true, + "experimentalDecorators": true, + "emitDecoratorMetadata": true + }, + "include": [ + "src/**/*", + "generated/**/*" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/market/mk20/types.go b/market/mk20/types.go new file mode 100644 index 000000000..9014f123f --- /dev/null +++ b/market/mk20/types.go @@ -0,0 +1,274 @@ +package mk20 + +import ( + "net/http" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" +) + +// Deal represents a structure defining the details and components of a specific deal in the system. +type Deal struct { + + // Identifier represents a unique identifier for the deal in ULID format. + Identifier ulid.ULID `json:"identifier" swaggertype:"string" format:"ulid" example:"01ARZ3NDEKTSV4RRFFQ69G5FAV"` + + // Client wallet string for the deal + Client string `json:"client"` + + // Data represents the source of piece data and associated metadata. + Data *DataSource `json:"data,omitempty"` + + // Products represents a collection of product-specific information associated with a deal + Products Products `json:"products"` +} + +type Products struct { + // DDOV1 represents a product v1 configuration for Direct Data Onboarding (DDO) + DDOV1 *DDOV1 `json:"ddo_v1,omitempty"` + + // RetrievalV1 represents configuration for retrieval settings in the system, including indexing and announcement flags. + RetrievalV1 *RetrievalV1 `json:"retrieval_v1,omitempty"` + + // PDPV1 represents product-specific configuration for PDP version 1 deals. + PDPV1 *PDPV1 `json:"pdp_v1,omitempty"` +} + +// DataSource represents the source of piece data, including metadata and optional methods to fetch or describe the data origin. +type DataSource struct { + + // PieceCID represents the unique identifier (pieceCID V2) for a piece of data, stored as a CID object. + PieceCID cid.Cid `json:"piece_cid" swaggertype:"object,string" example:"/:bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq"` + + // Format defines the format of the piece data, which can include CAR, Aggregate, or Raw formats. + Format PieceDataFormat `json:"format"` + + // SourceHTTP represents the HTTP-based source of piece data within a deal, including raw size and URLs for retrieval. + SourceHTTP *DataSourceHTTP `json:"source_http,omitempty"` + + // SourceAggregate represents an aggregated source, comprising multiple data sources as pieces. + SourceAggregate *DataSourceAggregate `json:"source_aggregate,omitempty"` + + // SourceOffline defines the data source for offline pieces, including raw size information. + SourceOffline *DataSourceOffline `json:"source_offline,omitempty"` + + // SourceHttpPut allow clients to push piece data after deal is accepted + SourceHttpPut *DataSourceHttpPut `json:"source_http_put,omitempty"` +} + +// PieceDataFormat represents various formats in which piece data can be defined, including CAR files, aggregate formats, or raw byte data. +type PieceDataFormat struct { + + // Car represents the optional CAR file format. + Car *FormatCar `json:"car,omitempty"` + + // Aggregate holds a reference to the aggregated format of piece data. + Aggregate *FormatAggregate `json:"aggregate,omitempty"` + + // Raw represents the raw format of the piece data, encapsulated as bytes. + Raw *FormatBytes `json:"raw,omitempty"` +} + +// FormatCar represents the CAR (Content Addressable archive) format for piece data serialization. +type FormatCar struct{} + +// FormatAggregate represents the aggregated format for piece data, identified by its type. +type FormatAggregate struct { + + // Type specifies the type of aggregation for data pieces, represented by an AggregateType value. + Type AggregateType `json:"type"` + + // Sub holds a slice of DataSource, representing details of sub pieces aggregated under this format. + // The order must be same as segment index to avoid incorrect indexing of sub pieces in an aggregate + Sub []DataSource `json:"sub"` +} + +// FormatBytes defines the raw byte representation of data as a format. +type FormatBytes struct{} + +// DataSourceOffline represents the data source for offline pieces. +type DataSourceOffline struct{} + +// DataSourceAggregate represents an aggregated data source containing multiple individual DataSource pieces. +type DataSourceAggregate struct { + Pieces []DataSource `json:"pieces"` +} + +// DataSourceHTTP represents an HTTP-based data source for retrieving piece data, including associated URLs. +type DataSourceHTTP struct { + // URLs lists the HTTP endpoints where the piece data can be fetched. + URLs []HttpUrl `json:"urls"` +} + +// HttpUrl represents an HTTP endpoint configuration for fetching piece data. +type HttpUrl struct { + + // URL specifies the HTTP endpoint where the piece data can be fetched. + URL string `json:"url"` + + // HTTPHeaders represents the HTTP headers associated with the URL. + Headers http.Header `json:"headers"` + + // Priority indicates the order preference for using the URL in requests, with lower values having higher priority. + Priority int `json:"priority"` + + // Fallback indicates whether this URL serves as a fallback option when other URLs fail. + Fallback bool `json:"fallback"` +} + +// DataSourceHttpPut represents a data source allowing clients to push piece data after a deal is accepted. +type DataSourceHttpPut struct{} + +// AggregateType represents an unsigned integer used to define the type of aggregation for data pieces in the system. +type AggregateType int + +const ( + + // AggregateTypeNone represents the default aggregation type, indicating no specific aggregation is applied. + AggregateTypeNone AggregateType = iota + + // AggregateTypeV1 represents the first version of the aggregate type in the system. This is current PODSI aggregation + // based on https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0058.md + AggregateTypeV1 +) + +// DealCode represents an error code as an integer value +type DealCode int + +const ( + + // Ok represents a successful operation with an HTTP status code of 200. + Ok DealCode = 200 + + // ErrUnAuthorized represents an error indicating unauthorized access with the code 401. + ErrUnAuthorized DealCode = 401 + + // ErrBadProposal represents a validation error that indicates an invalid or malformed proposal input in the context of validation logic. + ErrBadProposal DealCode = 400 + + // ErrDealNotFound indicates that the specified deal could not be found, corresponding to the HTTP status code 404. + ErrDealNotFound DealCode = 404 + + // ErrMalformedDataSource indicates that the provided data source is incorrectly formatted or contains invalid data. + ErrMalformedDataSource DealCode = 430 + + // ErrUnsupportedDataSource indicates the specified data source is not supported or disabled for use in the current context. + ErrUnsupportedDataSource DealCode = 422 + + // ErrUnsupportedProduct indicates that the requested product is not supported by the provider. + ErrUnsupportedProduct DealCode = 423 + + // ErrProductNotEnabled indicates that the requested product is not enabled on the provider. + ErrProductNotEnabled DealCode = 424 + + // ErrProductValidationFailed indicates a failure during product-specific validation due to invalid or missing data. + ErrProductValidationFailed DealCode = 425 + + // ErrDealRejectedByMarket indicates that a proposed deal was rejected by the market for not meeting its acceptance criteria or rules. + ErrDealRejectedByMarket DealCode = 426 + + // ErrServerInternalError indicates an internal server error with a corresponding error code of 500. + ErrServerInternalError DealCode = 500 + + // ErrServiceMaintenance indicates that the service is temporarily unavailable due to maintenance, corresponding to HTTP status code 503. + ErrServiceMaintenance DealCode = 503 + + // ErrServiceOverloaded indicates that the service is overloaded and cannot process the request at the moment. + ErrServiceOverloaded DealCode = 429 + + // ErrMarketNotEnabled indicates that the market is not enabled for the requested operation. + ErrMarketNotEnabled DealCode = 440 + + // ErrDurationTooShort indicates that the provided duration value does not meet the minimum required threshold. + ErrDurationTooShort DealCode = 441 +) + +// ProductName represents a type for defining the product name identifier used in various operations and validations. +type ProductName string + +const ( + // ProductNameDDOV1 represents the identifier for the "ddo_v1" product used in contract operations and validations. + ProductNameDDOV1 ProductName = "ddo_v1" + ProductNamePDPV1 ProductName = "pdp_v1" + ProductNameRetrievalV1 ProductName = "retrieval_v1" +) + +type DataSourceName string + +const ( + DataSourceNameHTTP DataSourceName = "http" + DataSourceNameAggregate DataSourceName = "aggregate" + DataSourceNameOffline DataSourceName = "offline" + DataSourceNamePut DataSourceName = "put" +) + +type product interface { + Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, error) + ProductName() ProductName +} + +// UploadStatusCode defines the return codes for the upload status +type UploadStatusCode int + +const ( + + // UploadStatusCodeOk represents a successful upload operation with status code 200. + UploadStatusCodeOk UploadStatusCode = 200 + + // UploadStatusCodeDealNotFound indicates that the requested deal was not found, corresponding to status code 404. + UploadStatusCodeDealNotFound UploadStatusCode = 404 + + // UploadStatusCodeUploadNotStarted indicates that the upload process has not started yet. + UploadStatusCodeUploadNotStarted UploadStatusCode = 425 + + // UploadStatusCodeServerError indicates an internal server error occurred during the upload process, corresponding to status code 500. + UploadStatusCodeServerError UploadStatusCode = 500 +) + +// UploadStartCode represents an integer type for return codes related to the upload start process. +type UploadStartCode int + +const ( + + // UploadStartCodeOk indicates a successful upload start request with status code 200. + UploadStartCodeOk UploadStartCode = 200 + + // UploadStartCodeBadRequest indicates a bad upload start request error with status code 400. + UploadStartCodeBadRequest UploadStartCode = 400 + + // UploadStartCodeDealNotFound represents a 404 status indicating the deal was not found during the upload start process. + UploadStartCodeDealNotFound UploadStartCode = 404 + + // UploadStartCodeAlreadyStarted indicates that the upload process has already been initiated and cannot be started again. + UploadStartCodeAlreadyStarted UploadStartCode = 409 + + // UploadStartCodeServerError indicates an error occurred on the server while processing an upload start request. + UploadStartCodeServerError UploadStartCode = 500 +) + +// UploadCode represents return codes related to upload operations, typically based on HTTP status codes. +type UploadCode int + +const ( + + // UploadOk indicates a successful upload operation, represented by the HTTP status code 200. + UploadOk UploadCode = 200 + + // UploadBadRequest represents a bad request error with an HTTP status code of 400. + UploadBadRequest UploadCode = 400 + + // UploadNotFound represents an error where the requested upload chunk could not be found, typically corresponding to HTTP status 404. + UploadNotFound UploadCode = 404 + + // UploadChunkAlreadyUploaded indicates that the chunk has already been uploaded and cannot be re-uploaded. + UploadChunkAlreadyUploaded UploadCode = 409 + + // UploadServerError indicates a server-side error occurred during the upload process, represented by the HTTP status code 500. + UploadServerError UploadCode = 500 + + // UploadRateLimit indicates that the upload operation is being rate-limited, corresponding to the HTTP status code 429. + UploadRateLimit UploadCode = 429 +) diff --git a/market/mk20/types_test.go b/market/mk20/types_test.go new file mode 100644 index 000000000..3d65b9341 --- /dev/null +++ b/market/mk20/types_test.go @@ -0,0 +1,298 @@ +package mk20 + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/stretchr/testify/require" +) + +func mustCID(t *testing.T, s string) cid.Cid { + t.Helper() + c, err := cid.Parse(s) + if err != nil { + t.Fatalf("parse cid: %v", err) + } + return c +} + +func mustULID(t *testing.T, s string) ulid.ULID { + t.Helper() + id, err := ulid.Parse(s) + if err != nil { + t.Fatalf("parse ulid: %v", err) + } + return id +} + +func TestDeal_MarshalUnmarshal_Minimal(t *testing.T) { + orig := Deal{ + Identifier: mustULID(t, "01ARZ3NDEKTSV4RRFFQ69G5FAV"), + Client: "f1abcclient", + // Data omitted (omitempty) + // Products is empty struct; inner fields are omitempty + } + + b, err := json.Marshal(orig) + if err != nil { + t.Fatalf("marshal: %v", err) + } + + // Expect "data" to be absent and "products" to be an empty object + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + t.Fatalf("unmarshal into map: %v", err) + } + if _, ok := m["data"]; ok { + t.Fatalf("expected 'data' to be omitted, found present") + } + if p, ok := m["products"]; !ok { + t.Fatalf("expected 'products' present") + } else if obj, ok := p.(map[string]any); !ok || len(obj) != 0 { + t.Fatalf("expected 'products' to be empty object, got: %#v", p) + } + + var round Deal + if err := json.Unmarshal(b, &round); err != nil { + t.Fatalf("round unmarshal: %v", err) + } + + if !reflect.DeepEqual(orig, round) { + t.Fatalf("round trip mismatch:\norig: %#v\nround:%#v", orig, round) + } +} + +func TestHttpHeaderRoundTrip(t *testing.T) { + orig := http.Header{ + "X-Trace-Id": []string{"abc123"}, + "Cache-Control": []string{"no-cache", "private"}, + } + b, err := json.Marshal(orig) + if err != nil { + t.Fatalf("marshal: %v", err) + } + t.Logf("marshaled JSON: %s", string(b)) + + var round http.Header + if err := json.Unmarshal(b, &round); err != nil { + t.Fatalf("unmarshal: %v", err) + } + t.Logf("unmarshaled Struct: %+v", round) + v := round.Values("Cache-Control") + require.Equal(t, 2, len(v)) + require.Equal(t, "no-cache", v[0]) + require.Equal(t, "private", v[1]) + v = round.Values("X-Trace-Id") + require.Equal(t, 1, len(v)) + require.Equal(t, "abc123", v[0]) +} + +func TestDeal_HTTPSourceWithHeaders(t *testing.T) { + orig := Deal{ + Identifier: mustULID(t, "01ARZ3NDEKTSV4RRFFQ69G5FAV"), + Client: "f1client", + Data: &DataSource{ + PieceCID: mustCID(t, "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq"), + Format: PieceDataFormat{Car: &FormatCar{}}, + SourceHTTP: &DataSourceHTTP{ + URLs: []HttpUrl{ + { + URL: "https://example.com/piece/xyz", + Headers: http.Header{"X-Trace-Id": []string{"abc123"}, "Cache-Control": []string{"no-cache", "private"}}, + Priority: 10, + Fallback: false, + }, + { + URL: "http://127.0.0.1:8080/piece/xyz", + Headers: http.Header{}, // empty headers should round-trip + Priority: 20, + Fallback: true, + }, + }, + }, + }, + Products: Products{}, // explicit empty + } + + b, err := json.Marshal(orig) + if err != nil { + t.Fatalf("marshal: %v", err) + } + + var round Deal + if err := json.Unmarshal(b, &round); err != nil { + t.Fatalf("unmarshal: %v", err) + } + + if !reflect.DeepEqual(orig, round) { + t.Fatalf("round trip mismatch:\norig: %#v\nround:%#v", orig, round) + } + + // Spot-check headers survived correctly + gotHdr := round.Data.SourceHTTP.URLs[0].Headers + if v := gotHdr.Get("X-Trace-ID"); v != "abc123" { + t.Fatalf("expected X-Trace-ID=abc123, got %q", v) + } +} + +func TestDeal_Aggregate_NoSub_vs_EmptySub(t *testing.T) { + // Case A: Aggregate.Sub is nil (no omitempty on Sub), expected to marshal as "sub": null + withNil := Deal{ + Identifier: mustULID(t, "01ARZ3NDEKTSV4RRFFQ69G5FAV"), + Client: "f1client", + Data: &DataSource{ + PieceCID: mustCID(t, "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq"), + Format: PieceDataFormat{ + Aggregate: &FormatAggregate{ + Type: AggregateTypeV1, + Sub: nil, // important + }, + }, + }, + } + + bNil, err := json.Marshal(withNil) + if err != nil { + t.Fatalf("marshal nil-sub: %v", err) + } + var objNil map[string]any + _ = json.Unmarshal(bNil, &objNil) // ignore error; presence check is all we need + // Navigate: data.format.aggregate.sub should be null + dataMap := objNil["data"].(map[string]any) + format := dataMap["format"].(map[string]any) + agg := format["aggregate"].(map[string]any) + if _, ok := agg["sub"]; !ok { + t.Fatalf("expected aggregate.sub to be present (as null) when Sub == nil") + } + if agg["sub"] != nil { + t.Fatalf("expected aggregate.sub == null; got: %#v", agg["sub"]) + } + + // Case B: Aggregate.Sub is empty slice, expected to marshal as "sub": [] + withEmpty := withNil + withEmpty.Data.Format.Aggregate.Sub = []DataSource{} + + bEmpty, err := json.Marshal(withEmpty) + if err != nil { + t.Fatalf("marshal empty-sub: %v", err) + } + var objEmpty map[string]any + _ = json.Unmarshal(bEmpty, &objEmpty) + dataMap = objEmpty["data"].(map[string]any) + format = dataMap["format"].(map[string]any) + agg = format["aggregate"].(map[string]any) + arr, ok := agg["sub"].([]any) + if !ok { + t.Fatalf("expected aggregate.sub to be [] when Sub == empty slice; got %#v", agg["sub"]) + } + if len(arr) != 0 { + t.Fatalf("expected empty array for sub; got len=%d", len(arr)) + } +} + +func TestDeal_Aggregate_WithSubpieces_RoundTrip(t *testing.T) { + // Two subpieces: one Raw, one Car + sub1 := DataSource{ + PieceCID: mustCID(t, "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pq"), + Format: PieceDataFormat{Raw: &FormatBytes{}}, + SourceOffline: &DataSourceOffline{}, // ensure additional fields survive + } + sub2 := DataSource{ + PieceCID: mustCID(t, "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pd"), + Format: PieceDataFormat{Car: &FormatCar{}}, + } + + orig := Deal{ + Identifier: mustULID(t, "01ARZ3NDEKTSV4RRFFQ69G5FAV"), + Client: "f1client", + Data: &DataSource{ + PieceCID: mustCID(t, "bafkzcibfxx3meais3xzh6qn56y6hiasmrufhegoweu3o5ccofs74nfdfr4yn76pqz4pe"), + Format: PieceDataFormat{ + Aggregate: &FormatAggregate{ + Type: AggregateTypeV1, + Sub: []DataSource{sub1, sub2}, + }, + }, + SourceAggregate: &DataSourceAggregate{Pieces: []DataSource{sub1, sub2}}, + }, + Products: Products{ + // exercise omitempty pointers: all nil + }, + } + + b, err := json.Marshal(orig) + if err != nil { + t.Fatalf("marshal: %v", err) + } + + var round Deal + if err := json.Unmarshal(b, &round); err != nil { + t.Fatalf("unmarshal: %v", err) + } + + // Order must be preserved + if len(round.Data.Format.Aggregate.Sub) != 2 { + t.Fatalf("expected 2 subpieces, got %d", len(round.Data.Format.Aggregate.Sub)) + } + if round.Data.Format.Aggregate.Sub[0].PieceCID.String() != sub1.PieceCID.String() { + t.Fatalf("subpiece[0] order changed") + } + + if !reflect.DeepEqual(orig, round) { + t.Fatalf("round trip mismatch:\norig: %#v\nround:%#v", orig, round) + } +} + +func TestDeal_Products_OmitEmptyInnerFields(t *testing.T) { + // All product pointers nil -> products should marshal as {} + orig := Deal{ + Identifier: mustULID(t, "01ARZ3NDEKTSV4RRFFQ69G5FAV"), + Client: "f1client", + Products: Products{}, + } + + b, err := json.Marshal(orig) + if err != nil { + t.Fatalf("marshal: %v", err) + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + t.Fatalf("unmarshal map: %v", err) + } + p, ok := m["products"] + if !ok { + t.Fatalf("products missing") + } + if obj, ok := p.(map[string]any); !ok || len(obj) != 0 { + t.Fatalf("expected products to be {}, got %#v", p) + } + + var round Deal + if err := json.Unmarshal(b, &round); err != nil { + t.Fatalf("round unmarshal: %v", err) + } + if !reflect.DeepEqual(orig.Products, round.Products) { + t.Fatalf("products changed on round trip: %#v -> %#v", orig.Products, round.Products) + } +} + +func TestPartialUnmarshal(t *testing.T) { + //iString := "{\"client\":\"t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai\",\"products\":{\"pdp_v1\":{\"create_data_set\":true,\"add_piece\":true,\"record_keeper\":\"0x158c8f05A616403589b99BE5d82d756860363A92\"}}}" + iString := "{\"client\":\"t1k7ctd3hvmwwjdpb2ipd3kr7n4vk3xzfvzbbdrai\",\"data\":{\"format\":{\"raw\":{}},\"piece_cid\":\"bafkzcibfxx3meais7dgqlg24253d7s2unmxkczzlrnsoni6zmvjy6vi636nslfyggu3q\",\"source_http_put\":{}},\"identifier\":\"01K4R3EK6QEPASQH8KFPKVBNWR\",\"products\":{\"pdp_v1\":{\"add_piece\":true,\"delete_data_set\":false,\"delete_piece\":false,\"extra_data\":[],\"record_keeper\":\"0x158c8f05A616403589b99BE5d82d756860363A92\"},\"retrieval_v1\":{\"announce_payload\":true,\"announce_piece\":true,\"indexing\":true}}}" + var deal Deal + if err := json.Unmarshal([]byte(iString), &deal); err != nil { + t.Fatal(err) + } + fmt.Printf("%+v\n", deal) + require.NotNil(t, deal) + require.NotNil(t, deal.Products) + require.NotNil(t, deal.Products.PDPV1) + require.Equal(t, true, deal.Products.PDPV1.CreateDataSet) + require.Equal(t, true, deal.Products.PDPV1.AddPiece) + require.Equal(t, "0x158c8f05A616403589b99BE5d82d756860363A92", deal.Products.PDPV1.RecordKeeper) +} diff --git a/market/mk20/utils.go b/market/mk20/utils.go new file mode 100644 index 000000000..c686ac053 --- /dev/null +++ b/market/mk20/utils.go @@ -0,0 +1,1017 @@ +package mk20 + +import ( + "bytes" + "context" + "crypto/ed25519" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "runtime" + "runtime/debug" + "strings" + "time" + + "github.com/ipfs/go-cid" + "github.com/mr-tron/base58" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + fcrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/commcidv2" + + "github.com/filecoin-project/lotus/lib/sigs" +) + +func (d *Deal) Validate(db *harmonydb.DB, cfg *config.MK20Config, Auth string) (DealCode, error) { + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + log.Errorf("panic occurred in validation: %v\n%s", r, trace[:n]) + debug.PrintStack() + } + }() + + err := validateClient(d.Client, Auth) + if err != nil { + return ErrBadProposal, err + } + + code, err := d.Products.Validate(db, cfg) + if err != nil { + return code, xerrors.Errorf("products validation failed: %w", err) + } + + // Validate data if present + if d.Data != nil { + return d.Data.Validate(db) + } + + // Return without validating data for initial phase of /Put deals or PDP Delete deals + return Ok, nil +} + +func validateClient(client string, auth string) error { + if client == "" { + return xerrors.Errorf("client is empty") + } + + keyType, pubKey, _, err := parseCustomAuth(auth) + if err != nil { + return xerrors.Errorf("parsing auth header: %w", err) + } + + switch keyType { + case "ed25519": + kStr, err := ED25519ToString(pubKey) + if err != nil { + return xerrors.Errorf("invalid public key for auth header: %w", err) + } + if client != kStr { + return xerrors.Errorf("client in deal does not match client in auth header") + } + return nil + case "secp256k1", "bls", "delegated": + addr, err := address.NewFromBytes(pubKey) + if err != nil { + return xerrors.Errorf("invalid public key for auth header: %w", err) + } + if client != addr.String() { + return xerrors.Errorf("client in deal does not match client in auth header") + } + return nil + default: + return fmt.Errorf("unsupported key type: %s", keyType) + } +} + +func (d DataSource) Validate(db *harmonydb.DB) (DealCode, error) { + + err := ValidatePieceCID(d.PieceCID) + if err != nil { + return ErrBadProposal, err + } + + if d.SourceOffline != nil && d.SourceHTTP != nil && d.SourceAggregate != nil && d.SourceHttpPut != nil { + return ErrBadProposal, xerrors.Errorf("multiple sources defined for data source") + } + + if d.SourceOffline == nil && d.SourceHTTP == nil && d.SourceAggregate == nil && d.SourceHttpPut == nil { + return ErrBadProposal, xerrors.Errorf("no source defined for data source") + } + + var fcar, fagg, fraw bool + + if d.Format.Car != nil { + fcar = true + } + + if d.Format.Aggregate != nil { + fagg = true + + if d.Format.Aggregate.Type != AggregateTypeV1 { + return ErrMalformedDataSource, xerrors.Errorf("aggregate type not supported") + } + + // If client will supply individual pieces + if d.SourceAggregate != nil { + code, err := IsDataSourceEnabled(db, d.SourceAggregate.Name()) + if err != nil { + return code, err + } + + if len(d.SourceAggregate.Pieces) == 0 { + return ErrMalformedDataSource, xerrors.Errorf("no pieces in aggregate") + } + + if len(d.SourceAggregate.Pieces) == 1 { + return ErrMalformedDataSource, xerrors.Errorf("aggregate must have at least 2 pieces") + } + + for _, p := range d.SourceAggregate.Pieces { + err := ValidatePieceCID(p.PieceCID) + if err != nil { + return ErrMalformedDataSource, xerrors.Errorf("invalid piece cid") + } + + var ifcar, ifraw bool + + if p.Format.Car != nil { + ifcar = true + } + + if p.Format.Aggregate != nil { + return ErrMalformedDataSource, xerrors.Errorf("aggregate of aggregate is not supported") + } + + if p.Format.Raw != nil { + ifraw = true + } + + if !ifcar && !ifraw { + return ErrMalformedDataSource, xerrors.Errorf("no format defined for sub piece in aggregate") + } + + if ifcar && ifraw { + return ErrMalformedDataSource, xerrors.Errorf("multiple formats defined for sub piece in aggregate") + } + + if p.SourceAggregate != nil { + return ErrMalformedDataSource, xerrors.Errorf("aggregate of aggregate is not supported") + } + + if p.SourceOffline == nil && p.SourceHTTP == nil { + return ErrMalformedDataSource, xerrors.Errorf("no source defined for sub piece in aggregate") + } + + if p.SourceOffline != nil && p.SourceHTTP != nil { + return ErrMalformedDataSource, xerrors.Errorf("multiple sources defined for sub piece in aggregate") + } + + if p.SourceHTTP != nil { + if len(p.SourceHTTP.URLs) == 0 { + return ErrMalformedDataSource, xerrors.Errorf("no urls defined for sub piece in aggregate") + } + + for _, u := range p.SourceHTTP.URLs { + _, err := url.Parse(u.URL) + if err != nil { + return ErrMalformedDataSource, xerrors.Errorf("invalid url") + } + } + } + } + if len(d.Format.Aggregate.Sub) > 0 { + return ErrMalformedDataSource, xerrors.Errorf("sub pieces cannot be defined when dataSource is aggregate") + } + } else { + // If client will supply pre-aggregated piece + if len(d.Format.Aggregate.Sub) == 0 { + return ErrMalformedDataSource, xerrors.Errorf("no sub pieces defined under aggregate") + } + for _, p := range d.Format.Aggregate.Sub { + err := ValidatePieceCID(p.PieceCID) + if err != nil { + return ErrMalformedDataSource, xerrors.Errorf("invalid piece cid") + } + var ifcar, ifraw bool + if p.Format.Car != nil { + ifcar = true + } + + if p.Format.Aggregate != nil { + return ErrMalformedDataSource, xerrors.Errorf("aggregate of aggregate is not supported") + } + + if p.Format.Raw != nil { + ifraw = true + } + if !ifcar && !ifraw { + return ErrMalformedDataSource, xerrors.Errorf("no format defined for sub piece in aggregate") + } + if ifcar && ifraw { + return ErrMalformedDataSource, xerrors.Errorf("multiple formats defined for sub piece in aggregate") + } + if p.SourceAggregate != nil || p.SourceOffline != nil || p.SourceHTTP != nil || p.SourceHttpPut != nil { + return ErrMalformedDataSource, xerrors.Errorf("sub piece of pre-aggregated piece cannot have source defined") + } + } + } + } + + if d.Format.Raw != nil { + fraw = true + } + + if !fcar && !fagg && !fraw { + return ErrBadProposal, xerrors.Errorf("no format defined") + } + + if fcar && fagg || fcar && fraw || fagg && fraw { + return ErrBadProposal, xerrors.Errorf("multiple formats defined") + } + + if d.SourceHTTP != nil { + code, err := IsDataSourceEnabled(db, d.SourceHTTP.Name()) + if err != nil { + return code, err + } + + if len(d.SourceHTTP.URLs) == 0 { + return ErrMalformedDataSource, xerrors.Errorf("no urls defined") + } + + for _, u := range d.SourceHTTP.URLs { + _, err := url.Parse(u.URL) + if err != nil { + return ErrMalformedDataSource, xerrors.Errorf("invalid url") + } + } + } + + if d.SourceOffline != nil { + code, err := IsDataSourceEnabled(db, d.SourceOffline.Name()) + if err != nil { + return code, err + } + } + + if d.SourceHttpPut != nil { + code, err := IsDataSourceEnabled(db, d.SourceHttpPut.Name()) + if err != nil { + return code, err + } + } + + return Ok, nil +} + +func ValidatePieceCID(c cid.Cid) error { + if !c.Defined() { + return xerrors.Errorf("piece cid is not defined") + } + + if c.Prefix().Codec != cid.Raw { + return xerrors.Errorf("piece cid is not raw") + } + + commp, err := commcidv2.CommPFromPCidV2(c) + if err != nil { + return xerrors.Errorf("invalid piece cid: %w", err) + } + + if commp.PieceInfo().Size == 0 { + return xerrors.Errorf("piece size is 0") + } + + if commp.PayloadSize() == 0 { + return xerrors.Errorf("payload size is 0") + } + + if padreader.PaddedSize(commp.PayloadSize()).Padded() != commp.PieceInfo().Size { + return xerrors.Errorf("invalid piece size") + } + + return nil +} + +type PieceInfo struct { + PieceCIDV1 cid.Cid `json:"piece_cid"` + Size abi.PaddedPieceSize `json:"size"` + RawSize uint64 `json:"raw_size"` +} + +func (d *Deal) RawSize() (uint64, error) { + if d.Data == nil { + return 0, xerrors.Errorf("no data") + } + commp, err := commcidv2.CommPFromPCidV2(d.Data.PieceCID) + if err != nil { + return 0, xerrors.Errorf("invalid piece cid: %w", err) + } + return commp.PayloadSize(), nil +} + +func (d *Deal) Size() (abi.PaddedPieceSize, error) { + if d.Data == nil { + return 0, xerrors.Errorf("no data") + } + commp, err := commcidv2.CommPFromPCidV2(d.Data.PieceCID) + if err != nil { + return 0, xerrors.Errorf("invalid piece cid: %w", err) + } + return commp.PieceInfo().Size, nil +} + +func (d *Deal) PieceInfo() (*PieceInfo, error) { + return GetPieceInfo(d.Data.PieceCID) +} + +func GetPieceInfo(c cid.Cid) (*PieceInfo, error) { + commp, err := commcidv2.CommPFromPCidV2(c) + if err != nil { + return nil, xerrors.Errorf("invalid piece cid: %w", err) + } + return &PieceInfo{ + PieceCIDV1: commp.PCidV1(), + Size: commp.PieceInfo().Size, + RawSize: commp.PayloadSize(), + }, nil +} + +func (d Products) Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, error) { + var nproducts int + if d.DDOV1 != nil { + nproducts++ + code, err := d.DDOV1.Validate(db, cfg) + if err != nil { + return code, err + } + if d.RetrievalV1 == nil { + return ErrProductValidationFailed, xerrors.Errorf("retrieval v1 is required for ddo v1") + } + if d.RetrievalV1.AnnouncePiece { + return ErrProductValidationFailed, xerrors.Errorf("announce piece is not supported for ddo v1") + } + } + if d.RetrievalV1 != nil { + code, err := d.RetrievalV1.Validate(db, cfg) + if err != nil { + return code, err + } + } + if d.PDPV1 != nil { + nproducts++ + code, err := d.PDPV1.Validate(db, cfg) + if err != nil { + return code, err + } + // TODO: Enable this once Indexing is done + //if d.RetrievalV1 == nil { + // return ErrProductValidationFailed, xerrors.Errorf("retrieval v1 is required for pdp v1") + //} + //if d.RetrievalV1.Indexing || d.RetrievalV1.AnnouncePayload { + // return ErrProductValidationFailed, xerrors.Errorf("payload indexing and announcement is not supported for pdp v1") + //} + } + + if nproducts == 0 { + return ErrProductValidationFailed, xerrors.Errorf("no products defined") + } + + if d.DDOV1 != nil && d.PDPV1 != nil { + return ErrProductValidationFailed, xerrors.Errorf("ddo_v1 and pdp_v1 are mutually exclusive") + } + + return Ok, nil +} + +type DBDDOV1 struct { + DDO *DDOV1 `json:"ddo"` + DealID int64 `json:"deal_id"` + Complete bool `json:"complete"` + Error string `json:"error"` +} + +type DBPDPV1 struct { + PDP *PDPV1 `json:"pdp"` + Complete bool `json:"complete"` + Error string `json:"error"` +} + +type DBDeal struct { + Identifier string `db:"id"` + Client string `db:"client"` + PieceCIDV2 string `db:"piece_cid_v2"` + Data json.RawMessage `db:"data"` + DDOv1 json.RawMessage `db:"ddo_v1"` + RetrievalV1 json.RawMessage `db:"retrieval_v1"` + PDPV1 json.RawMessage `db:"pdp_v1"` +} + +func (d *Deal) ToDBDeal() (*DBDeal, error) { + ddeal := DBDeal{ + Identifier: d.Identifier.String(), + Client: d.Client, + } + + if d.Data != nil { + dataBytes, err := json.Marshal(d.Data) + if err != nil { + return nil, fmt.Errorf("marshal data: %w", err) + } + ddeal.PieceCIDV2 = d.Data.PieceCID.String() + ddeal.Data = dataBytes + } else { + ddeal.Data = []byte("null") + } + + if d.Products.DDOV1 != nil { + dddov1 := DBDDOV1{ + DDO: d.Products.DDOV1, + } + ddov1, err := json.Marshal(dddov1) + if err != nil { + return nil, fmt.Errorf("marshal ddov1: %w", err) + } + ddeal.DDOv1 = ddov1 + } else { + ddeal.DDOv1 = []byte("null") + } + + if d.Products.RetrievalV1 != nil { + rev, err := json.Marshal(d.Products.RetrievalV1) + if err != nil { + return nil, fmt.Errorf("marshal retrievalv1: %w", err) + } + ddeal.RetrievalV1 = rev + } else { + ddeal.RetrievalV1 = []byte("null") + } + + if d.Products.PDPV1 != nil { + dbpdpv1 := DBPDPV1{ + PDP: d.Products.PDPV1, + } + pdpv1, err := json.Marshal(dbpdpv1) + if err != nil { + return nil, fmt.Errorf("marshal pdpv1: %w", err) + } + ddeal.PDPV1 = pdpv1 + } else { + ddeal.PDPV1 = []byte("null") + } + + return &ddeal, nil +} + +func (d *Deal) SaveToDB(tx *harmonydb.Tx) error { + dbDeal, err := d.ToDBDeal() + if err != nil { + return xerrors.Errorf("to db deal: %w", err) + } + + var pieceCid interface{} + + if dbDeal.PieceCIDV2 != "" { + pieceCid = dbDeal.PieceCIDV2 + } else { + pieceCid = nil + } + + n, err := tx.Exec(`INSERT INTO market_mk20_deal (id, client, piece_cid_v2, data, ddo_v1, retrieval_v1, pdp_v1) + VALUES ($1, $2, $3, $4, $5, $6, $7)`, + dbDeal.Identifier, + dbDeal.Client, + pieceCid, + dbDeal.Data, + dbDeal.DDOv1, + dbDeal.RetrievalV1, + dbDeal.PDPV1) + if err != nil { + return xerrors.Errorf("insert deal: %w", err) + } + if n != 1 { + return xerrors.Errorf("insert deal: expected 1 row affected, got %d", n) + } + return nil +} + +func (d *Deal) UpdateDealWithTx(tx *harmonydb.Tx) error { + dbDeal, err := d.ToDBDeal() + if err != nil { + return xerrors.Errorf("to db deal: %w", err) + } + + var pieceCid interface{} + + if dbDeal.PieceCIDV2 != "" { + pieceCid = dbDeal.PieceCIDV2 + } else { + pieceCid = nil + } + + n, err := tx.Exec(`UPDATE market_mk20_deal SET + piece_cid_v2 = $1, + data = $2, + ddo_v1 = $3, + retrieval_v1 = $4, + pdp_v1 = $5`, pieceCid, dbDeal.Data, dbDeal.DDOv1, dbDeal.RetrievalV1, dbDeal.PDPV1) + if err != nil { + return xerrors.Errorf("update deal: %w", err) + } + if n != 1 { + return xerrors.Errorf("update deal: expected 1 row affected, got %d", n) + } + return nil +} + +func (d *Deal) UpdateDeal(tx *harmonydb.Tx) error { + dbDeal, err := d.ToDBDeal() + if err != nil { + return xerrors.Errorf("to db deal: %w", err) + } + + var pieceCid interface{} + + if dbDeal.PieceCIDV2 != "" { + pieceCid = dbDeal.PieceCIDV2 + } else { + pieceCid = nil + } + + n, err := tx.Exec(`UPDATE market_mk20_deal SET + piece_cid_v2 = $1, + data = $2, + ddo_v1 = $3, + retrieval_v1 = $4, + pdp_v1 = $5`, pieceCid, dbDeal.Data, dbDeal.DDOv1, dbDeal.RetrievalV1, dbDeal.PDPV1) + if err != nil { + return xerrors.Errorf("update deal: %w", err) + } + if n != 1 { + return xerrors.Errorf("update deal: expected 1 row affected, got %d", n) + } + return nil +} + +func DealFromTX(tx *harmonydb.Tx, id ulid.ULID) (*Deal, error) { + var dbDeal []DBDeal + err := tx.Select(&dbDeal, `SELECT + id, + client, + data, + ddo_v1, + retrieval_v1, + pdp_v1 FROM market_mk20_deal WHERE id = $1`, id.String()) + if err != nil { + return nil, xerrors.Errorf("getting deal from DB: %w", err) + } + if len(dbDeal) != 1 { + return nil, xerrors.Errorf("expected 1 deal, got %d", len(dbDeal)) + } + return dbDeal[0].ToDeal() +} + +func DealFromDB(ctx context.Context, db *harmonydb.DB, id ulid.ULID) (*Deal, error) { + var dbDeal []DBDeal + err := db.Select(ctx, &dbDeal, `SELECT + id, + client, + data, + ddo_v1, + retrieval_v1, + pdp_v1 FROM market_mk20_deal WHERE id = $1`, id.String()) + if err != nil { + return nil, xerrors.Errorf("getting deal from DB: %w", err) + } + if len(dbDeal) != 1 { + return nil, xerrors.Errorf("expected 1 deal, got %d", len(dbDeal)) + } + return dbDeal[0].ToDeal() +} + +func (d *DBDeal) ToDeal() (*Deal, error) { + var deal Deal + + if len(d.Data) > 0 && string(d.Data) != "null" { + var ds DataSource + if err := json.Unmarshal(d.Data, &ds); err != nil { + return nil, fmt.Errorf("unmarshal data: %w", err) + } + deal.Data = &ds + } + + if len(d.DDOv1) > 0 && string(d.DDOv1) != "null" { + var dddov1 DBDDOV1 + if err := json.Unmarshal(d.DDOv1, &dddov1); err != nil { + return nil, fmt.Errorf("unmarshal ddov1: %w", err) + } + deal.Products.DDOV1 = dddov1.DDO + } + + if len(d.RetrievalV1) > 0 && string(d.RetrievalV1) != "null" { + var rev RetrievalV1 + if err := json.Unmarshal(d.RetrievalV1, &rev); err != nil { + return nil, fmt.Errorf("unmarshal retrievalv1: %w", err) + } + deal.Products.RetrievalV1 = &rev + } + + if len(d.PDPV1) > 0 && string(d.PDPV1) != "null" { + var dddov1 DBPDPV1 + if err := json.Unmarshal(d.PDPV1, &dddov1); err != nil { + return nil, fmt.Errorf("unmarshal pdpv1: %w", err) + } + deal.Products.PDPV1 = dddov1.PDP + } + + id, err := ulid.Parse(d.Identifier) + if err != nil { + return nil, fmt.Errorf("parse id: %w", err) + } + deal.Identifier = id + + deal.Client = d.Client + + return &deal, nil +} + +func DBDealsToDeals(deals []*DBDeal) ([]*Deal, error) { + var result []*Deal + for _, d := range deals { + deal, err := d.ToDeal() + if err != nil { + return nil, err + } + result = append(result, deal) + } + return result, nil +} + +type ProviderDealRejectionInfo struct { + HTTPCode DealCode + Reason string +} + +// DealStatusResponse represents the response of a deal's status, including its current state and an optional error message. +type DealStatusResponse struct { + + // State indicates the current processing state of the deal as a DealState value. + State DealState `json:"status"` + + // ErrorMsg is an optional field containing error details associated with the deal's current state if an error occurred. + ErrorMsg string `json:"errorMsg"` +} + +// DealProductStatusResponse represents the status response for deal products with their respective deal statuses. +type DealProductStatusResponse struct { + + // DDOV1 holds the DealStatusResponse for product "ddo_v1". + DDOV1 *DealStatusResponse `json:"ddo_v1,omitempty"` + + // PDPV1 represents the DealStatusResponse for the product pdp_v1. + PDPV1 *DealStatusResponse `json:"pdp_v1,omitempty"` +} + +// DealStatus represents the status of a deal, including the HTTP code and an optional response detailing the deal's state and error message. +type DealStatus struct { + + // Response provides details about the deal's per product status, such as its current state and any associated error messages, if available. + Response *DealProductStatusResponse + + // HTTPCode represents the HTTP status code providing additional context about the deal status or possible errors. + HTTPCode int +} + +// DealState represents the current status of a deal in the system as a string value. +type DealState string + +const ( + + // DealStateAccepted represents the state where a deal has been accepted and is pending further processing in the system. + DealStateAccepted DealState = "accepted" + + // DealStateAwaitingUpload represents the state where a deal is awaiting file upload to proceed further in the process. + DealStateAwaitingUpload DealState = "uploading" + + // DealStateProcessing represents the state of a deal currently being processed in the pipeline. + DealStateProcessing DealState = "processing" + + // DealStateSealing indicates that the deal is currently being sealed in the system. + DealStateSealing DealState = "sealing" + + // DealStateIndexing represents the state where a deal is undergoing indexing in the system. + DealStateIndexing DealState = "indexing" + + // DealStateFailed indicates that the deal has failed due to an error during processing, sealing, or indexing. + DealStateFailed DealState = "failed" + + // DealStateComplete indicates that the deal has successfully completed all processing and is finalized in the system. + DealStateComplete DealState = "complete" +) + +// SupportedContracts represents a collection of contract addresses supported by a system or application. +type SupportedContracts struct { + // Contracts represents a list of supported contract addresses in string format. + Contracts []string `json:"contracts"` +} + +func NewULID() (ulid.ULID, error) { + return ulid.New(ulid.Timestamp(time.Now()), rand.Reader) +} + +func (dsh *DataSourceHTTP) Name() DataSourceName { + return DataSourceNameHTTP +} + +func (dso *DataSourceOffline) Name() DataSourceName { + return DataSourceNameOffline +} + +func (dsa *DataSourceAggregate) Name() DataSourceName { + return DataSourceNameAggregate +} + +func (dsh *DataSourceHttpPut) Name() DataSourceName { + return DataSourceNamePut +} + +func IsDataSourceEnabled(db *harmonydb.DB, name DataSourceName) (DealCode, error) { + var enabled bool + + err := db.QueryRow(context.Background(), `SELECT enabled FROM market_mk20_data_source WHERE name = $1`, name).Scan(&enabled) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return http.StatusInternalServerError, xerrors.Errorf("data source %s is not enabled", name) + } + } + if !enabled { + return ErrUnsupportedDataSource, xerrors.Errorf("data source %s is not enabled", name) + } + return Ok, nil +} + +func IsProductEnabled(db *harmonydb.DB, name ProductName) (DealCode, error) { + var enabled bool + + err := db.QueryRow(context.Background(), `SELECT enabled FROM market_mk20_products WHERE name = $1`, name).Scan(&enabled) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return http.StatusInternalServerError, xerrors.Errorf("data source %s is not enabled", name) + } + return ErrUnsupportedProduct, xerrors.Errorf("product %s is not supported by the provider", name) + } + if !enabled { + return ErrProductNotEnabled, xerrors.Errorf("product %s is not enabled", name) + } + return Ok, nil +} + +// SupportedProducts represents array of products supported by the SP. +type SupportedProducts struct { + // Contracts represents a list of supported contract addresses in string format. + Products []string `json:"products"` +} + +// SupportedDataSources represents array of dats sources supported by the SP. +type SupportedDataSources struct { + // Contracts represents a list of supported contract addresses in string format. + Sources []string `json:"sources"` +} + +// StartUpload represents metadata for initiating an upload operation. +type StartUpload struct { + + // RawSize indicates the total size of the data to be uploaded in bytes. + RawSize uint64 `json:"raw_size"` + + // ChunkSize defines the size of each data chunk to be used during the upload process. + ChunkSize int64 `json:"chunk_size"` +} + +// UploadStatus represents the status of a file upload process, including progress and missing chunks. +type UploadStatus struct { + + // TotalChunks represents the total number of chunks required for the upload. + TotalChunks int `json:"total_chunks"` + + // Uploaded represents the number of chunks successfully uploaded. + Uploaded int `json:"uploaded"` + + // Missing represents the number of chunks that are not yet uploaded. + Missing int `json:"missing"` + + // UploadedChunks is a slice containing the indices of successfully uploaded chunks. + UploadedChunks []int `json:"uploaded_chunks"` + + //MissingChunks is a slice containing the indices of missing chunks. + MissingChunks []int `json:"missing_chunks"` +} + +func UpdateDealDetails(ctx context.Context, db *harmonydb.DB, id ulid.ULID, deal *Deal, cfg *config.MK20Config, auth string) (*Deal, DealCode, []ProductName, error) { + ddeal, err := DealFromDB(ctx, db, id) + if err != nil { + return nil, ErrServerInternalError, nil, xerrors.Errorf("getting deal from DB: %w", err) + } + + // Run the following checks + // If Data details exist, do not update them + // If DDOV1 is defined then no update to it + // If PDPV1 is defined then no update to it + // If PDPv1 is defined by DDOV1 is not, then allow updating it + // If DDOV1 is defined then don't allow PDPv1 yet + + // TODO: Remove this once DDO is live + if ddeal.Products.PDPV1 != nil { + if ddeal.Data == nil { + ddeal.Data = deal.Data + } + return ddeal, Ok, nil, nil + } + + if ddeal.Data == nil { + ddeal.Data = deal.Data + } + + var newProducts []ProductName + + if ddeal.Products.DDOV1 == nil || deal.Products.DDOV1 != nil { + ddeal.Products.DDOV1 = deal.Products.DDOV1 + newProducts = append(newProducts, ProductNameDDOV1) + } + + if ddeal.Products.RetrievalV1 == nil || deal.Products.RetrievalV1 != nil { + ddeal.Products.RetrievalV1 = deal.Products.RetrievalV1 + newProducts = append(newProducts, ProductNameRetrievalV1) + } + + code, err := ddeal.Validate(db, cfg, auth) + if err != nil { + return nil, code, nil, xerrors.Errorf("validate deal: %w", err) + } + return ddeal, Ok, newProducts, nil +} + +func AuthenticateClient(db *harmonydb.DB, id, client string) (bool, error) { + var allowed bool + err := db.QueryRow(context.Background(), `SELECT EXISTS (SELECT 1 FROM market_mk20_deal WHERE id = $1 AND client = $2)`, id, client).Scan(&allowed) + if err != nil { + return false, xerrors.Errorf("querying client: %w", err) + } + return allowed, nil +} + +func clientAllowed(ctx context.Context, db *harmonydb.DB, client string, cfg *config.CurioConfig) (bool, error) { + if !cfg.Market.StorageMarketConfig.MK20.DenyUnknownClients { + return true, nil + } + + var allowed bool + err := db.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM market_mk20_clients WHERE client = $1 AND allowed = TRUE)`, client).Scan(&allowed) + if err != nil { + return false, xerrors.Errorf("querying client: %w", err) + } + return allowed, nil +} + +const Authprefix = "CurioAuth " + +// Auth verifies the custom authentication header by parsing its contents and validating the signature using the provided database connection. +func Auth(header string, db *harmonydb.DB, cfg *config.CurioConfig) (bool, string, error) { + keyType, pubKey, sig, err := parseCustomAuth(header) + if err != nil { + return false, "", xerrors.Errorf("parsing auth header: %w", err) + } + return verifySignature(db, keyType, pubKey, sig, cfg) +} + +func parseCustomAuth(header string) (keyType string, pubKey, sig []byte, err error) { + + if !strings.HasPrefix(header, Authprefix) { + return "", nil, nil, errors.New("missing CustomAuth prefix") + } + + parts := strings.SplitN(strings.TrimPrefix(header, Authprefix), ":", 3) + if len(parts) != 3 { + return "", nil, nil, errors.New("invalid auth format") + } + + keyType = parts[0] + pubKey, err = base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", nil, nil, fmt.Errorf("invalid pubkey base64: %w", err) + } + + if len(pubKey) == 0 { + return "", nil, nil, fmt.Errorf("invalid pubkey") + } + + sig, err = base64.StdEncoding.DecodeString(parts[2]) + if err != nil { + return "", nil, nil, fmt.Errorf("invalid signature base64: %w", err) + } + + if len(sig) == 0 { + return "", nil, nil, fmt.Errorf("invalid signature") + } + + return keyType, pubKey, sig, nil +} + +func verifySignature(db *harmonydb.DB, keyType string, pubKey, signature []byte, cfg *config.CurioConfig) (bool, string, error) { + now := time.Now().Truncate(time.Hour) + minus1 := now.Add(-59 * time.Minute) + plus1 := now.Add(59 * time.Minute) + timeStamps := []time.Time{now, minus1, plus1} + var msgs [][32]byte + + for _, t := range timeStamps { + msgs = append(msgs, sha256.Sum256(bytes.Join([][]byte{pubKey, []byte(t.Format(time.RFC3339))}, []byte{}))) + } + + switch keyType { + case "ed25519": + if len(pubKey) != ed25519.PublicKeySize || len(signature) != ed25519.SignatureSize { + return false, "", errors.New("invalid ed25519 sizes") + } + keyStr, err := ED25519ToString(pubKey) + if err != nil { + return false, "", xerrors.Errorf("invalid ed25519 pubkey: %w", err) + } + + allowed, err := clientAllowed(context.Background(), db, keyStr, cfg) + if err != nil { + return false, "", xerrors.Errorf("checking client allowed: %w", err) + } + if !allowed { + return false, "", nil + } + + for _, m := range msgs { + ok := ed25519.Verify(pubKey, m[:], signature) + if ok { + return true, keyStr, nil + } + } + return false, "", errors.New("invalid ed25519 signature") + + case "secp256k1", "bls", "delegated": + return verifyFilSignature(db, pubKey, signature, msgs, cfg) + default: + return false, "", fmt.Errorf("unsupported key type: %s", keyType) + } +} + +func verifyFilSignature(db *harmonydb.DB, pubKey, signature []byte, msgs [][32]byte, cfg *config.CurioConfig) (bool, string, error) { + signs := &fcrypto.Signature{} + err := signs.UnmarshalBinary(signature) + if err != nil { + return false, "", xerrors.Errorf("invalid signature") + } + addr, err := address.NewFromBytes(pubKey) + if err != nil { + return false, "", xerrors.Errorf("invalid filecoin pubkey") + } + + allowed, err := clientAllowed(context.Background(), db, addr.String(), cfg) + if err != nil { + return false, "", xerrors.Errorf("checking client allowed: %w", err) + } + if !allowed { + return false, "", nil + } + + for _, m := range msgs { + err = sigs.Verify(signs, addr, m[:]) + if err == nil { + return true, addr.String(), nil + } + } + + return false, "", errors.New("invalid signature") +} + +func ED25519ToString(pubKey []byte) (string, error) { + if len(pubKey) != ed25519.PublicKeySize { + return "", errors.New("invalid ed25519 pubkey size") + } + return base58.FastBase58Encoding(pubKey), nil +} + +func StringToED25519(addr string) ([]byte, error) { + return base58.FastBase58Decoding(addr) +} diff --git a/market/retrieval/piecehandler.go b/market/retrieval/piecehandler.go index 26f7c1c3b..fd5d6b0cf 100644 --- a/market/retrieval/piecehandler.go +++ b/market/retrieval/piecehandler.go @@ -12,8 +12,6 @@ import ( "github.com/ipfs/go-cid" "go.opencensus.io/stats" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/curio/lib/cachedreader" "github.com/filecoin-project/curio/market/retrieval/remoteblockstore" ) @@ -46,6 +44,20 @@ func (rp *Provider) handleByPieceCid(w http.ResponseWriter, r *http.Request) { return } + // For PDP metrics check if this is a PDP piece + var isPDP bool + err = rp.db.QueryRow(ctx, `SELECT EXISTS(SELECT 1 FROM pdp_piecerefs WHERE piece_cid = $1 LIMIT 1)`, pieceCid.String()).Scan(&isPDP) + if err != nil { + log.Errorf("failed to query the db for piece CID %s: %s", pieceCid, err) + w.WriteHeader(http.StatusInternalServerError) + stats.Record(ctx, remoteblockstore.HttpPieceByCid500ResponseCount.M(1)) + return + } + + if isPDP { + stats.Record(ctx, remoteblockstore.PDPPieceByCidRequestCount.M(1)) + } + // Get a reader over the piece reader, size, err := rp.cpr.GetSharedPieceReader(ctx, pieceCid) if err != nil { @@ -73,15 +85,22 @@ func (rp *Provider) handleByPieceCid(w http.ResponseWriter, r *http.Request) { return } - setHeaders(w, pieceCid, contentType) - serveContent(w, r, size, reader) + setHeaders(w, pieceCid, contentType, int64(size)) + serveContent(w, r, reader) - stats.Record(ctx, remoteblockstore.HttpPieceByCid200ResponseCount.M(1)) - stats.Record(ctx, remoteblockstore.HttpPieceByCidRequestDuration.M(float64(time.Since(startTime).Milliseconds()))) + if isPDP { + stats.Record(ctx, remoteblockstore.PDPPieceByCid200ResponseCount.M(1)) + stats.Record(ctx, remoteblockstore.PDPPieceByCidRequestDuration.M(float64(time.Since(startTime).Milliseconds()))) + stats.Record(ctx, remoteblockstore.PDPPieceBytesServedCount.M(int64(size))) + } else { + stats.Record(ctx, remoteblockstore.HttpPieceByCid200ResponseCount.M(1)) + stats.Record(ctx, remoteblockstore.HttpPieceByCidRequestDuration.M(float64(time.Since(startTime).Milliseconds()))) + } } -func setHeaders(w http.ResponseWriter, pieceCid cid.Cid, contentType string) { +func setHeaders(w http.ResponseWriter, pieceCid cid.Cid, contentType string, size int64) { w.Header().Set("Vary", "Accept-Encoding") + w.Header().Set("Content-Length", fmt.Sprintf("%d", size)) w.Header().Set("Cache-Control", "public, max-age=29030400, immutable") w.Header().Set("Content-Type", contentType) if contentType != "application/octet-stream" { @@ -98,7 +117,7 @@ func setHeaders(w http.ResponseWriter, pieceCid cid.Cid, contentType string) { } -func serveContent(res http.ResponseWriter, req *http.Request, size abi.UnpaddedPieceSize, content io.ReadSeeker) { +func serveContent(res http.ResponseWriter, req *http.Request, content io.ReadSeeker) { // Note that the last modified time is a constant value because the data // in a piece identified by a cid will never change. @@ -109,6 +128,5 @@ func serveContent(res http.ResponseWriter, req *http.Request, size abi.UnpaddedP } // Send the content - res.Header().Set("Content-Length", fmt.Sprintf("%d", size)) http.ServeContent(res, req, "", lastModified, content) } diff --git a/market/retrieval/remoteblockstore/metric.go b/market/retrieval/remoteblockstore/metric.go index 8c3ddba1a..865580cd0 100644 --- a/market/retrieval/remoteblockstore/metric.go +++ b/market/retrieval/remoteblockstore/metric.go @@ -28,13 +28,20 @@ var defaultMillisecondsDistribution = view.Distribution( var ( RetrievalInfo = stats.Int64("retrieval_info", "Arbitrary counter to tag node info to", stats.UnitDimensionless) - // piece + // piece (including PDP and sub pieces) HttpPieceByCidRequestCount = stats.Int64("http/piece_by_cid_request_count", "Counter of /piece/ requests", stats.UnitDimensionless) HttpPieceByCidRequestDuration = stats.Float64("http/piece_by_cid_request_duration_ms", "Time spent retrieving a piece by cid", stats.UnitMilliseconds) HttpPieceByCid200ResponseCount = stats.Int64("http/piece_by_cid_200_response_count", "Counter of /piece/ 200 responses", stats.UnitDimensionless) HttpPieceByCid400ResponseCount = stats.Int64("http/piece_by_cid_400_response_count", "Counter of /piece/ 400 responses", stats.UnitDimensionless) HttpPieceByCid404ResponseCount = stats.Int64("http/piece_by_cid_404_response_count", "Counter of /piece/ 404 responses", stats.UnitDimensionless) HttpPieceByCid500ResponseCount = stats.Int64("http/piece_by_cid_500_response_count", "Counter of /piece/ 500 responses", stats.UnitDimensionless) + + // pdp + PDPPieceByCidRequestCount = stats.Int64("pdp/piece_by_cid_request_count", "Counter of /piece/ requests for PDP", stats.UnitDimensionless) + PDPPieceByCidRequestDuration = stats.Float64("pdp/piece_by_cid_request_duration_ms", "Time spent retrieving a piece by cid for PDP", stats.UnitMilliseconds) + PDPPieceByCid200ResponseCount = stats.Int64("pdp/piece_by_cid_200_response_count", "Counter of /piece/ 200 responses for PDP", stats.UnitDimensionless) + PDPPieceBytesServedCount = stats.Int64("pdp/piece_bytes_served_count", "Counter of the number of bytes served by PDP since startup", stats.UnitBytes) + // Gateway HttpRblsGetRequestCount = stats.Int64("http/rbls_get_request_count", "Counter of RemoteBlockstore Get requests", stats.UnitDimensionless) HttpRblsGetSuccessResponseCount = stats.Int64("http/rbls_get_success_response_count", "Counter of successful RemoteBlockstore Get responses", stats.UnitDimensionless) @@ -74,6 +81,26 @@ var ( Aggregation: view.Count(), } + PDPPieceByCidRequestCountView = &view.View{ + Measure: PDPPieceByCidRequestCount, + Aggregation: view.Count(), + } + + PDPPieceByCidRequestDurationView = &view.View{ + Measure: PDPPieceByCidRequestDuration, + Aggregation: defaultMillisecondsDistribution, + } + + PDPPieceByCid200ResponseCountView = &view.View{ + Measure: PDPPieceByCid200ResponseCount, + Aggregation: view.Count(), + } + + PDPPieceBytesServedCountView = &view.View{ + Measure: PDPPieceBytesServedCount, + Aggregation: view.Count(), + } + HttpRblsGetRequestCountView = &view.View{ Measure: HttpRblsGetRequestCount, Aggregation: view.Count(), @@ -116,7 +143,7 @@ var ( } ) -// CacheViews groups all cache-related default views. +// RetrievalViews groups all retrieval-related default views. func init() { err := view.Register( HttpPieceByCidRequestCountView, @@ -125,6 +152,10 @@ func init() { HttpPieceByCid400ResponseCountView, HttpPieceByCid404ResponseCountView, HttpPieceByCid500ResponseCountView, + PDPPieceByCidRequestCountView, + PDPPieceByCidRequestDurationView, + PDPPieceByCid200ResponseCountView, + PDPPieceBytesServedCountView, HttpRblsGetRequestCountView, HttpRblsGetSuccessResponseCountView, HttpRblsGetFailResponseCountView, diff --git a/market/retrieval/remoteblockstore/remoteblockstore.go b/market/retrieval/remoteblockstore/remoteblockstore.go index e37bc4dac..82a35dde1 100644 --- a/market/retrieval/remoteblockstore/remoteblockstore.go +++ b/market/retrieval/remoteblockstore/remoteblockstore.go @@ -31,7 +31,7 @@ var log = logging.Logger("remote-blockstore") type idxAPI interface { PiecesContainingMultihash(ctx context.Context, m multihash.Multihash) ([]indexstore.PieceInfo, error) - GetOffset(ctx context.Context, pieceCid cid.Cid, hash multihash.Multihash) (uint64, error) + GetOffset(ctx context.Context, pieceCidv2 cid.Cid, hash multihash.Multihash) (uint64, error) } // RemoteBlockstore is a read-only blockstore over all cids across all pieces on a provider. @@ -115,8 +115,7 @@ func (ro *RemoteBlockstore) Get(ctx context.Context, c cid.Cid) (b blocks.Block, var merr error for _, piece := range pieces { data, err := func() ([]byte, error) { - // Get a reader over the piece data - reader, _, err := ro.cpr.GetSharedPieceReader(ctx, piece.PieceCid) + reader, _, err := ro.cpr.GetSharedPieceReader(ctx, piece.PieceCidV2) if err != nil { return nil, fmt.Errorf("getting piece reader: %w", err) } @@ -125,19 +124,19 @@ func (ro *RemoteBlockstore) Get(ctx context.Context, c cid.Cid) (b blocks.Block, }(reader) // Get the offset of the block within the piece (CAR file) - offset, err := ro.idxApi.GetOffset(ctx, piece.PieceCid, c.Hash()) + offset, err := ro.idxApi.GetOffset(ctx, piece.PieceCidV2, c.Hash()) if err != nil { - return nil, fmt.Errorf("getting offset/size for cid %s in piece %s: %w", c, piece.PieceCid, err) + return nil, fmt.Errorf("getting offset/size for cid %s in piece %s: %w", c, piece.PieceCidV2, err) } // Seek to the section offset readerAt := io.NewSectionReader(reader, int64(offset), int64(piece.BlockSize+MaxCarBlockPrefixSize)) readCid, data, err := util.ReadNode(bufio.NewReader(readerAt)) if err != nil { - return nil, fmt.Errorf("reading data for block %s from reader for piece %s: %w", c, piece.PieceCid, err) + return nil, fmt.Errorf("reading data for block %s from reader for piece %s: %w", c, piece.PieceCidV2, err) } if !bytes.Equal(readCid.Hash(), c.Hash()) { - return nil, fmt.Errorf("read block %s from reader for piece %s, but expected block %s", readCid, piece.PieceCid, c) + return nil, fmt.Errorf("read block %s from reader for piece %s, but expected block %s", readCid, piece.PieceCidV2, c) } return data, nil }() diff --git a/pdp/contract/IPDPProvingSchedule.abi b/pdp/contract/IPDPProvingSchedule.abi index df191dd6d..cab2b5656 100644 --- a/pdp/contract/IPDPProvingSchedule.abi +++ b/pdp/contract/IPDPProvingSchedule.abi @@ -1,59 +1,35 @@ [ { "type": "function", - "name": "challengeWindow", + "name": "getPDPConfig", "inputs": [], "outputs": [ { - "name": "", - "type": "uint256", - "internalType": "uint256" - } - ], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "getChallengesPerProof", - "inputs": [], - "outputs": [ - { - "name": "", + "name": "maxProvingPeriod", "type": "uint64", "internalType": "uint64" - } - ], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "getMaxProvingPeriod", - "inputs": [], - "outputs": [ + }, { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "initChallengeWindowStart", - "inputs": [], - "outputs": [ + "name": "challengeWindow", + "type": "uint256", + "internalType": "uint256" + }, { - "name": "", + "name": "challengesPerProof", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "initChallengeWindowStart", "type": "uint256", "internalType": "uint256" } ], - "stateMutability": "pure" + "stateMutability": "view" }, { "type": "function", - "name": "nextChallengeWindowStart", + "name": "nextPDPChallengeWindowStart", "inputs": [ { "name": "setId", diff --git a/pdp/contract/IPDPProvingSchedule.json b/pdp/contract/IPDPProvingSchedule.json index b1fbe4526..97afead93 100644 --- a/pdp/contract/IPDPProvingSchedule.json +++ b/pdp/contract/IPDPProvingSchedule.json @@ -1 +1 @@ -{"abi":[{"type":"function","name":"challengeWindow","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"pure"},{"type":"function","name":"getChallengesPerProof","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"pure"},{"type":"function","name":"getMaxProvingPeriod","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"pure"},{"type":"function","name":"initChallengeWindowStart","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"pure"},{"type":"function","name":"nextChallengeWindowStart","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"}],"bytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"deployedBytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"methodIdentifiers":{"challengeWindow()":"861a1412","getChallengesPerProof()":"47d3dfe7","getMaxProvingPeriod()":"f2f12333","initChallengeWindowStart()":"21918cea","nextChallengeWindowStart(uint256)":"8bf96d28"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.23+commit.f704f362\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"name\":\"challengeWindow\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChallengesPerProof\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMaxProvingPeriod\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initChallengeWindowStart\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"nextChallengeWindowStart\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{\"challengeWindow()\":{\"returns\":{\"_0\":\"Challenge window size in epochs\"}},\"getChallengesPerProof()\":{\"returns\":{\"_0\":\"Number of challenges required per proof\"}},\"getMaxProvingPeriod()\":{\"returns\":{\"_0\":\"Maximum proving period in epochs\"}},\"nextChallengeWindowStart(uint256)\":{\"params\":{\"setId\":\"The ID of the proof set\"},\"returns\":{\"_0\":\"The block number when the next challenge window starts\"}}},\"title\":\"IPDPProvingWindow\",\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{\"challengeWindow()\":{\"notice\":\"Returns the number of epochs at the end of a proving period during which proofs can be submitted\"},\"getChallengesPerProof()\":{\"notice\":\"Returns the required number of challenges/merkle inclusion proofs per proof set\"},\"getMaxProvingPeriod()\":{\"notice\":\"Returns the number of epochs allowed before challenges must be resampled\"},\"initChallengeWindowStart()\":{\"notice\":\"Value for initializing the challenge window start for any proof set assuming proving period starts now\"},\"nextChallengeWindowStart(uint256)\":{\"notice\":\"Calculates the start of the next challenge window for a given proof set\"}},\"notice\":\"Interface for PDP Service SLA specifications\",\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/IPDPProvingSchedule.sol\":\"IPDPProvingSchedule\"},\"evmVersion\":\"shanghai\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/\",\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":@pythnetwork/pyth-sdk-solidity/=node_modules/@pythnetwork/pyth-sdk-solidity/\",\":erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/\",\":openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\"]},\"sources\":{\"src/IPDPProvingSchedule.sol\":{\"keccak256\":\"0x6fc7848345c358a7a18e43ad9d93c1ea5fecf9d3f0daca721576d6de96d797b2\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://ab29f0b39894650cf74b6a771e50bc50c91d54f6ba6e5a1b11c7cb1d7878d0cf\",\"dweb:/ipfs/QmawGZjCfua9dbJsqCzN6J9v3kLsE4oRLwMhbbcE4RYUNh\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.23+commit.f704f362"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"pure","type":"function","name":"challengeWindow","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"pure","type":"function","name":"getChallengesPerProof","outputs":[{"internalType":"uint64","name":"","type":"uint64"}]},{"inputs":[],"stateMutability":"pure","type":"function","name":"getMaxProvingPeriod","outputs":[{"internalType":"uint64","name":"","type":"uint64"}]},{"inputs":[],"stateMutability":"pure","type":"function","name":"initChallengeWindowStart","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"nextChallengeWindowStart","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]}],"devdoc":{"kind":"dev","methods":{"challengeWindow()":{"returns":{"_0":"Challenge window size in epochs"}},"getChallengesPerProof()":{"returns":{"_0":"Number of challenges required per proof"}},"getMaxProvingPeriod()":{"returns":{"_0":"Maximum proving period in epochs"}},"nextChallengeWindowStart(uint256)":{"params":{"setId":"The ID of the proof set"},"returns":{"_0":"The block number when the next challenge window starts"}}},"version":1},"userdoc":{"kind":"user","methods":{"challengeWindow()":{"notice":"Returns the number of epochs at the end of a proving period during which proofs can be submitted"},"getChallengesPerProof()":{"notice":"Returns the required number of challenges/merkle inclusion proofs per proof set"},"getMaxProvingPeriod()":{"notice":"Returns the number of epochs allowed before challenges must be resampled"},"initChallengeWindowStart()":{"notice":"Value for initializing the challenge window start for any proof set assuming proving period starts now"},"nextChallengeWindowStart(uint256)":{"notice":"Calculates the start of the next challenge window for a given proof set"}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/","@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","@pythnetwork/pyth-sdk-solidity/=node_modules/@pythnetwork/pyth-sdk-solidity/","erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/","openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/","openzeppelin-contracts/=lib/openzeppelin-contracts/"],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/IPDPProvingSchedule.sol":"IPDPProvingSchedule"},"evmVersion":"shanghai","libraries":{}},"sources":{"src/IPDPProvingSchedule.sol":{"keccak256":"0x6fc7848345c358a7a18e43ad9d93c1ea5fecf9d3f0daca721576d6de96d797b2","urls":["bzz-raw://ab29f0b39894650cf74b6a771e50bc50c91d54f6ba6e5a1b11c7cb1d7878d0cf","dweb:/ipfs/QmawGZjCfua9dbJsqCzN6J9v3kLsE4oRLwMhbbcE4RYUNh"],"license":"UNLICENSED"}},"version":1},"id":0} \ No newline at end of file +{"abi":[{"type":"function","name":"getPDPConfig","inputs":[],"outputs":[{"name":"maxProvingPeriod","type":"uint64","internalType":"uint64"},{"name":"challengeWindow","type":"uint256","internalType":"uint256"},{"name":"challengesPerProof","type":"uint256","internalType":"uint256"},{"name":"initChallengeWindowStart","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"nextPDPChallengeWindowStart","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"}],"bytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"deployedBytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"methodIdentifiers":{"getPDPConfig()":"ea0f9354","nextPDPChallengeWindowStart(uint256)":"11d41294"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.23+commit.f704f362\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"name\":\"getPDPConfig\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"maxProvingPeriod\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"challengeWindow\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"challengesPerProof\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initChallengeWindowStart\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"nextPDPChallengeWindowStart\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{\"getPDPConfig()\":{\"returns\":{\"challengeWindow\":\"Number of epochs for the challenge window\",\"challengesPerProof\":\"Number of challenges required per proof\",\"initChallengeWindowStart\":\"Initial challenge window start for new data sets assuming proving period starts now\",\"maxProvingPeriod\":\"Maximum number of epochs between proofs\"}},\"nextPDPChallengeWindowStart(uint256)\":{\"params\":{\"setId\":\"The ID of the data set\"},\"returns\":{\"_0\":\"The block number when the next challenge window starts\"}}},\"title\":\"IPDPProvingSchedule\",\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{\"getPDPConfig()\":{\"notice\":\"Returns PDP configuration values\"},\"nextPDPChallengeWindowStart(uint256)\":{\"notice\":\"Returns the start of the next challenge window for a data set\"}},\"notice\":\"Interface for PDP Service SLA specifications\",\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/IPDPProvingSchedule.sol\":\"IPDPProvingSchedule\"},\"evmVersion\":\"shanghai\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":1000000000},\"remappings\":[\":@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/\",\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":@pythnetwork/pyth-sdk-solidity/=lib/pyth-sdk-solidity/\",\":erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/\",\":openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\",\":pyth-sdk-solidity/=lib/pyth-sdk-solidity/\"],\"viaIR\":true},\"sources\":{\"src/IPDPProvingSchedule.sol\":{\"keccak256\":\"0x18f592eda642914eab092c28ae9527e20571a2a7191c82f475a432660c6a5417\",\"license\":\"Apache-2.0 OR MIT\",\"urls\":[\"bzz-raw://142048503986dbb34905b03c99fed970d50dc0d088f2dc4274cc9e8c343ce83f\",\"dweb:/ipfs/QmbWYFT3ZuoefmSHmTrR64dinzxACfLKh9u3zHqRd1jETS\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.23+commit.f704f362"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"view","type":"function","name":"getPDPConfig","outputs":[{"internalType":"uint64","name":"maxProvingPeriod","type":"uint64"},{"internalType":"uint256","name":"challengeWindow","type":"uint256"},{"internalType":"uint256","name":"challengesPerProof","type":"uint256"},{"internalType":"uint256","name":"initChallengeWindowStart","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"nextPDPChallengeWindowStart","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]}],"devdoc":{"kind":"dev","methods":{"getPDPConfig()":{"returns":{"challengeWindow":"Number of epochs for the challenge window","challengesPerProof":"Number of challenges required per proof","initChallengeWindowStart":"Initial challenge window start for new data sets assuming proving period starts now","maxProvingPeriod":"Maximum number of epochs between proofs"}},"nextPDPChallengeWindowStart(uint256)":{"params":{"setId":"The ID of the data set"},"returns":{"_0":"The block number when the next challenge window starts"}}},"version":1},"userdoc":{"kind":"user","methods":{"getPDPConfig()":{"notice":"Returns PDP configuration values"},"nextPDPChallengeWindowStart(uint256)":{"notice":"Returns the start of the next challenge window for a data set"}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/","@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","@pythnetwork/pyth-sdk-solidity/=lib/pyth-sdk-solidity/","erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/","openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/","openzeppelin-contracts/=lib/openzeppelin-contracts/","pyth-sdk-solidity/=lib/pyth-sdk-solidity/"],"optimizer":{"enabled":true,"runs":1000000000},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/IPDPProvingSchedule.sol":"IPDPProvingSchedule"},"evmVersion":"shanghai","libraries":{},"viaIR":true},"sources":{"src/IPDPProvingSchedule.sol":{"keccak256":"0x18f592eda642914eab092c28ae9527e20571a2a7191c82f475a432660c6a5417","urls":["bzz-raw://142048503986dbb34905b03c99fed970d50dc0d088f2dc4274cc9e8c343ce83f","dweb:/ipfs/QmbWYFT3ZuoefmSHmTrR64dinzxACfLKh9u3zHqRd1jETS"],"license":"Apache-2.0 OR MIT"}},"version":1},"id":48} \ No newline at end of file diff --git a/pdp/contract/ListenerServiceWithViewContract.abi b/pdp/contract/ListenerServiceWithViewContract.abi new file mode 100644 index 000000000..5040222da --- /dev/null +++ b/pdp/contract/ListenerServiceWithViewContract.abi @@ -0,0 +1,15 @@ +[ + { + "type": "function", + "name": "viewContractAddress", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + } +] \ No newline at end of file diff --git a/pdp/contract/ListenerServiceWithViewContract.go b/pdp/contract/ListenerServiceWithViewContract.go new file mode 100644 index 000000000..44ec65320 --- /dev/null +++ b/pdp/contract/ListenerServiceWithViewContract.go @@ -0,0 +1,212 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package contract + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// ListenerServiceWithViewContractMetaData contains all meta data concerning the ListenerServiceWithViewContract contract. +var ListenerServiceWithViewContractMetaData = &bind.MetaData{ + ABI: "[{\"type\":\"function\",\"name\":\"viewContractAddress\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"}]", +} + +// ListenerServiceWithViewContractABI is the input ABI used to generate the binding from. +// Deprecated: Use ListenerServiceWithViewContractMetaData.ABI instead. +var ListenerServiceWithViewContractABI = ListenerServiceWithViewContractMetaData.ABI + +// ListenerServiceWithViewContract is an auto generated Go binding around an Ethereum contract. +type ListenerServiceWithViewContract struct { + ListenerServiceWithViewContractCaller // Read-only binding to the contract + ListenerServiceWithViewContractTransactor // Write-only binding to the contract + ListenerServiceWithViewContractFilterer // Log filterer for contract events +} + +// ListenerServiceWithViewContractCaller is an auto generated read-only Go binding around an Ethereum contract. +type ListenerServiceWithViewContractCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ListenerServiceWithViewContractTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ListenerServiceWithViewContractTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ListenerServiceWithViewContractFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ListenerServiceWithViewContractFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ListenerServiceWithViewContractSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ListenerServiceWithViewContractSession struct { + Contract *ListenerServiceWithViewContract // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ListenerServiceWithViewContractCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ListenerServiceWithViewContractCallerSession struct { + Contract *ListenerServiceWithViewContractCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ListenerServiceWithViewContractTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ListenerServiceWithViewContractTransactorSession struct { + Contract *ListenerServiceWithViewContractTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ListenerServiceWithViewContractRaw is an auto generated low-level Go binding around an Ethereum contract. +type ListenerServiceWithViewContractRaw struct { + Contract *ListenerServiceWithViewContract // Generic contract binding to access the raw methods on +} + +// ListenerServiceWithViewContractCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ListenerServiceWithViewContractCallerRaw struct { + Contract *ListenerServiceWithViewContractCaller // Generic read-only contract binding to access the raw methods on +} + +// ListenerServiceWithViewContractTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ListenerServiceWithViewContractTransactorRaw struct { + Contract *ListenerServiceWithViewContractTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewListenerServiceWithViewContract creates a new instance of ListenerServiceWithViewContract, bound to a specific deployed contract. +func NewListenerServiceWithViewContract(address common.Address, backend bind.ContractBackend) (*ListenerServiceWithViewContract, error) { + contract, err := bindListenerServiceWithViewContract(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ListenerServiceWithViewContract{ListenerServiceWithViewContractCaller: ListenerServiceWithViewContractCaller{contract: contract}, ListenerServiceWithViewContractTransactor: ListenerServiceWithViewContractTransactor{contract: contract}, ListenerServiceWithViewContractFilterer: ListenerServiceWithViewContractFilterer{contract: contract}}, nil +} + +// NewListenerServiceWithViewContractCaller creates a new read-only instance of ListenerServiceWithViewContract, bound to a specific deployed contract. +func NewListenerServiceWithViewContractCaller(address common.Address, caller bind.ContractCaller) (*ListenerServiceWithViewContractCaller, error) { + contract, err := bindListenerServiceWithViewContract(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ListenerServiceWithViewContractCaller{contract: contract}, nil +} + +// NewListenerServiceWithViewContractTransactor creates a new write-only instance of ListenerServiceWithViewContract, bound to a specific deployed contract. +func NewListenerServiceWithViewContractTransactor(address common.Address, transactor bind.ContractTransactor) (*ListenerServiceWithViewContractTransactor, error) { + contract, err := bindListenerServiceWithViewContract(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ListenerServiceWithViewContractTransactor{contract: contract}, nil +} + +// NewListenerServiceWithViewContractFilterer creates a new log filterer instance of ListenerServiceWithViewContract, bound to a specific deployed contract. +func NewListenerServiceWithViewContractFilterer(address common.Address, filterer bind.ContractFilterer) (*ListenerServiceWithViewContractFilterer, error) { + contract, err := bindListenerServiceWithViewContract(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ListenerServiceWithViewContractFilterer{contract: contract}, nil +} + +// bindListenerServiceWithViewContract binds a generic wrapper to an already deployed contract. +func bindListenerServiceWithViewContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ListenerServiceWithViewContractMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ListenerServiceWithViewContract.Contract.ListenerServiceWithViewContractCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ListenerServiceWithViewContract.Contract.ListenerServiceWithViewContractTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ListenerServiceWithViewContract.Contract.ListenerServiceWithViewContractTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ListenerServiceWithViewContract.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ListenerServiceWithViewContract.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ListenerServiceWithViewContract.Contract.contract.Transact(opts, method, params...) +} + +// ViewContractAddress is a free data retrieval call binding the contract method 0x7a9ebc15. +// +// Solidity: function viewContractAddress() view returns(address) +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractCaller) ViewContractAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _ListenerServiceWithViewContract.contract.Call(opts, &out, "viewContractAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// ViewContractAddress is a free data retrieval call binding the contract method 0x7a9ebc15. +// +// Solidity: function viewContractAddress() view returns(address) +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractSession) ViewContractAddress() (common.Address, error) { + return _ListenerServiceWithViewContract.Contract.ViewContractAddress(&_ListenerServiceWithViewContract.CallOpts) +} + +// ViewContractAddress is a free data retrieval call binding the contract method 0x7a9ebc15. +// +// Solidity: function viewContractAddress() view returns(address) +func (_ListenerServiceWithViewContract *ListenerServiceWithViewContractCallerSession) ViewContractAddress() (common.Address, error) { + return _ListenerServiceWithViewContract.Contract.ViewContractAddress(&_ListenerServiceWithViewContract.CallOpts) +} diff --git a/pdp/contract/PDPVerifier.abi b/pdp/contract/PDPVerifier.abi index 93aabc8db..069b9a0bb 100644 --- a/pdp/contract/PDPVerifier.abi +++ b/pdp/contract/PDPVerifier.abi @@ -71,7 +71,7 @@ }, { "type": "function", - "name": "MAX_ROOT_SIZE", + "name": "MAX_PIECE_SIZE_LOG2", "inputs": [], "outputs": [ { @@ -162,7 +162,20 @@ }, { "type": "function", - "name": "addRoots", + "name": "VERSION", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "string", + "internalType": "string" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "addPieces", "inputs": [ { "name": "setId", @@ -170,26 +183,14 @@ "internalType": "uint256" }, { - "name": "rootData", + "name": "pieceData", "type": "tuple[]", - "internalType": "struct PDPVerifier.RootData[]", + "internalType": "struct Cids.Cid[]", "components": [ { - "name": "root", - "type": "tuple", - "internalType": "struct Cids.Cid", - "components": [ - { - "name": "data", - "type": "bytes", - "internalType": "bytes" - } - ] - }, - { - "name": "rawSize", - "type": "uint256", - "internalType": "uint256" + "name": "data", + "type": "bytes", + "internalType": "bytes" } ] }, @@ -234,12 +235,17 @@ }, { "type": "function", - "name": "claimProofSetOwnership", + "name": "claimDataSetStorageProvider", "inputs": [ { "name": "setId", "type": "uint256", "internalType": "uint256" + }, + { + "name": "extraData", + "type": "bytes", + "internalType": "bytes" } ], "outputs": [], @@ -247,7 +253,7 @@ }, { "type": "function", - "name": "createProofSet", + "name": "createDataSet", "inputs": [ { "name": "listenerAddr", @@ -271,7 +277,26 @@ }, { "type": "function", - "name": "deleteProofSet", + "name": "dataSetLive", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "deleteDataSet", "inputs": [ { "name": "setId", @@ -289,7 +314,7 @@ }, { "type": "function", - "name": "findRootIds", + "name": "findPieceIds", "inputs": [ { "name": "setId", @@ -306,10 +331,10 @@ { "name": "", "type": "tuple[]", - "internalType": "struct PDPVerifier.RootIdAndOffset[]", + "internalType": "struct IPDPTypes.PieceIdAndOffset[]", "components": [ { - "name": "rootId", + "name": "pieceId", "type": "uint256", "internalType": "uint256" }, @@ -325,11 +350,17 @@ }, { "type": "function", - "name": "getChallengeFinality", - "inputs": [], + "name": "getActivePieceCount", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "internalType": "uint256" + } + ], "outputs": [ { - "name": "", + "name": "activeCount", "type": "uint256", "internalType": "uint256" } @@ -338,44 +369,71 @@ }, { "type": "function", - "name": "getChallengeRange", + "name": "getActivePieces", "inputs": [ { "name": "setId", "type": "uint256", "internalType": "uint256" + }, + { + "name": "offset", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "limit", + "type": "uint256", + "internalType": "uint256" } ], "outputs": [ { - "name": "", - "type": "uint256", - "internalType": "uint256" + "name": "pieces", + "type": "tuple[]", + "internalType": "struct Cids.Cid[]", + "components": [ + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "pieceIds", + "type": "uint256[]", + "internalType": "uint256[]" + }, + { + "name": "rawSizes", + "type": "uint256[]", + "internalType": "uint256[]" + }, + { + "name": "hasMore", + "type": "bool", + "internalType": "bool" } ], "stateMutability": "view" }, { "type": "function", - "name": "getFILUSDPrice", + "name": "getChallengeFinality", "inputs": [], "outputs": [ { "name": "", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "", - "type": "int32", - "internalType": "int32" + "type": "uint256", + "internalType": "uint256" } ], "stateMutability": "view" }, { "type": "function", - "name": "getNextChallengeEpoch", + "name": "getChallengeRange", "inputs": [ { "name": "setId", @@ -394,20 +452,7 @@ }, { "type": "function", - "name": "getNextProofSetId", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "getNextRootId", + "name": "getDataSetLastProvenEpoch", "inputs": [ { "name": "setId", @@ -426,7 +471,7 @@ }, { "type": "function", - "name": "getProofSetLastProvenEpoch", + "name": "getDataSetLeafCount", "inputs": [ { "name": "setId", @@ -445,7 +490,7 @@ }, { "type": "function", - "name": "getProofSetLeafCount", + "name": "getDataSetListener", "inputs": [ { "name": "setId", @@ -456,15 +501,15 @@ "outputs": [ { "name": "", - "type": "uint256", - "internalType": "uint256" + "type": "address", + "internalType": "address" } ], "stateMutability": "view" }, { "type": "function", - "name": "getProofSetListener", + "name": "getDataSetStorageProvider", "inputs": [ { "name": "setId", @@ -477,13 +522,36 @@ "name": "", "type": "address", "internalType": "address" + }, + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getFILUSDPrice", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "", + "type": "int32", + "internalType": "int32" } ], "stateMutability": "view" }, { "type": "function", - "name": "getProofSetOwner", + "name": "getNextChallengeEpoch", "inputs": [ { "name": "setId", @@ -494,23 +562,31 @@ "outputs": [ { "name": "", - "type": "address", - "internalType": "address" - }, + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getNextDataSetId", + "inputs": [], + "outputs": [ { "name": "", - "type": "address", - "internalType": "address" + "type": "uint64", + "internalType": "uint64" } ], "stateMutability": "view" }, { "type": "function", - "name": "getRandomness", + "name": "getNextPieceId", "inputs": [ { - "name": "epoch", + "name": "setId", "type": "uint256", "internalType": "uint256" } @@ -526,7 +602,7 @@ }, { "type": "function", - "name": "getRootCid", + "name": "getPieceCid", "inputs": [ { "name": "setId", @@ -534,7 +610,7 @@ "internalType": "uint256" }, { - "name": "rootId", + "name": "pieceId", "type": "uint256", "internalType": "uint256" } @@ -557,7 +633,7 @@ }, { "type": "function", - "name": "getRootLeafCount", + "name": "getPieceLeafCount", "inputs": [ { "name": "setId", @@ -565,7 +641,26 @@ "internalType": "uint256" }, { - "name": "rootId", + "name": "pieceId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getRandomness", + "inputs": [ + { + "name": "epoch", "type": "uint256", "internalType": "uint256" } @@ -611,6 +706,13 @@ "outputs": [], "stateMutability": "nonpayable" }, + { + "type": "function", + "name": "migrate", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "nextProvingPeriod", @@ -649,12 +751,17 @@ }, { "type": "function", - "name": "proofSetLive", + "name": "pieceChallengable", "inputs": [ { "name": "setId", "type": "uint256", "internalType": "uint256" + }, + { + "name": "pieceId", + "type": "uint256", + "internalType": "uint256" } ], "outputs": [ @@ -668,7 +775,7 @@ }, { "type": "function", - "name": "proposeProofSetOwner", + "name": "pieceLive", "inputs": [ { "name": "setId", @@ -676,7 +783,31 @@ "internalType": "uint256" }, { - "name": "newOwner", + "name": "pieceId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "proposeDataSetStorageProvider", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "newStorageProvider", "type": "address", "internalType": "address" } @@ -696,7 +827,7 @@ { "name": "proofs", "type": "tuple[]", - "internalType": "struct PDPVerifier.Proof[]", + "internalType": "struct IPDPTypes.Proof[]", "components": [ { "name": "leaf", @@ -736,55 +867,7 @@ }, { "type": "function", - "name": "rootChallengable", - "inputs": [ - { - "name": "setId", - "type": "uint256", - "internalType": "uint256" - }, - { - "name": "rootId", - "type": "uint256", - "internalType": "uint256" - } - ], - "outputs": [ - { - "name": "", - "type": "bool", - "internalType": "bool" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "rootLive", - "inputs": [ - { - "name": "setId", - "type": "uint256", - "internalType": "uint256" - }, - { - "name": "rootId", - "type": "uint256", - "internalType": "uint256" - } - ], - "outputs": [ - { - "name": "", - "type": "bool", - "internalType": "bool" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "scheduleRemovals", + "name": "schedulePieceDeletions", "inputs": [ { "name": "setId", @@ -792,7 +875,7 @@ "internalType": "uint256" }, { - "name": "rootIds", + "name": "pieceIds", "type": "uint256[]", "internalType": "uint256[]" }, @@ -838,16 +921,54 @@ }, { "type": "event", - "name": "Debug", + "name": "ContractUpgraded", "inputs": [ { - "name": "message", + "name": "version", "type": "string", "indexed": false, "internalType": "string" }, { - "name": "value", + "name": "implementation", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "DataSetCreated", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "indexed": true, + "internalType": "uint256" + }, + { + "name": "storageProvider", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "DataSetDeleted", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "indexed": true, + "internalType": "uint256" + }, + { + "name": "deletedLeafCount", "type": "uint256", "indexed": false, "internalType": "uint256" @@ -855,6 +976,19 @@ ], "anonymous": false }, + { + "type": "event", + "name": "DataSetEmpty", + "inputs": [ + { + "name": "setId", + "type": "uint256", + "indexed": true, + "internalType": "uint256" + } + ], + "anonymous": false + }, { "type": "event", "name": "Initialized", @@ -914,7 +1048,7 @@ }, { "type": "event", - "name": "PossessionProven", + "name": "PiecesAdded", "inputs": [ { "name": "setId", @@ -923,20 +1057,21 @@ "internalType": "uint256" }, { - "name": "challenges", + "name": "pieceIds", + "type": "uint256[]", + "indexed": false, + "internalType": "uint256[]" + }, + { + "name": "pieceCids", "type": "tuple[]", "indexed": false, - "internalType": "struct PDPVerifier.RootIdAndOffset[]", + "internalType": "struct Cids.Cid[]", "components": [ { - "name": "rootId", - "type": "uint256", - "internalType": "uint256" - }, - { - "name": "offset", - "type": "uint256", - "internalType": "uint256" + "name": "data", + "type": "bytes", + "internalType": "bytes" } ] } @@ -945,7 +1080,7 @@ }, { "type": "event", - "name": "ProofFeePaid", + "name": "PiecesRemoved", "inputs": [ { "name": "setId", @@ -954,29 +1089,17 @@ "internalType": "uint256" }, { - "name": "fee", - "type": "uint256", - "indexed": false, - "internalType": "uint256" - }, - { - "name": "price", - "type": "uint64", - "indexed": false, - "internalType": "uint64" - }, - { - "name": "expo", - "type": "int32", + "name": "pieceIds", + "type": "uint256[]", "indexed": false, - "internalType": "int32" + "internalType": "uint256[]" } ], "anonymous": false }, { "type": "event", - "name": "ProofSetCreated", + "name": "PossessionProven", "inputs": [ { "name": "setId", @@ -985,17 +1108,29 @@ "internalType": "uint256" }, { - "name": "owner", - "type": "address", - "indexed": true, - "internalType": "address" + "name": "challenges", + "type": "tuple[]", + "indexed": false, + "internalType": "struct IPDPTypes.PieceIdAndOffset[]", + "components": [ + { + "name": "pieceId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "offset", + "type": "uint256", + "internalType": "uint256" + } + ] } ], "anonymous": false }, { "type": "event", - "name": "ProofSetDeleted", + "name": "ProofFeePaid", "inputs": [ { "name": "setId", @@ -1004,30 +1139,29 @@ "internalType": "uint256" }, { - "name": "deletedLeafCount", + "name": "fee", "type": "uint256", "indexed": false, "internalType": "uint256" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "ProofSetEmpty", - "inputs": [ + }, { - "name": "setId", - "type": "uint256", - "indexed": true, - "internalType": "uint256" + "name": "price", + "type": "uint64", + "indexed": false, + "internalType": "uint64" + }, + { + "name": "expo", + "type": "int32", + "indexed": false, + "internalType": "int32" } ], "anonymous": false }, { "type": "event", - "name": "ProofSetOwnerChanged", + "name": "StorageProviderChanged", "inputs": [ { "name": "setId", @@ -1036,13 +1170,13 @@ "internalType": "uint256" }, { - "name": "oldOwner", + "name": "oldStorageProvider", "type": "address", "indexed": true, "internalType": "address" }, { - "name": "newOwner", + "name": "newStorageProvider", "type": "address", "indexed": true, "internalType": "address" @@ -1050,44 +1184,6 @@ ], "anonymous": false }, - { - "type": "event", - "name": "RootsAdded", - "inputs": [ - { - "name": "setId", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "rootIds", - "type": "uint256[]", - "indexed": false, - "internalType": "uint256[]" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "RootsRemoved", - "inputs": [ - { - "name": "setId", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "rootIds", - "type": "uint256[]", - "indexed": false, - "internalType": "uint256[]" - } - ], - "anonymous": false - }, { "type": "event", "name": "Upgraded", diff --git a/pdp/contract/PDPVerifier.json b/pdp/contract/PDPVerifier.json index 4288a174e..049cf67fa 100644 --- a/pdp/contract/PDPVerifier.json +++ b/pdp/contract/PDPVerifier.json @@ -1 +1 @@ -{"abi":[{"type":"constructor","inputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"BURN_ACTOR","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"EXTRA_DATA_MAX_SIZE","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"FIL_USD_PRICE_FEED_ID","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"LEAF_SIZE","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"MAX_ENQUEUED_REMOVALS","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"MAX_ROOT_SIZE","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"NO_CHALLENGE_SCHEDULED","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"NO_PROVEN_EPOCH","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"PYTH","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IPyth"}],"stateMutability":"view"},{"type":"function","name":"RANDOMNESS_PRECOMPILE","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"SECONDS_IN_DAY","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"UPGRADE_INTERFACE_VERSION","inputs":[],"outputs":[{"name":"","type":"string","internalType":"string"}],"stateMutability":"view"},{"type":"function","name":"addRoots","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootData","type":"tuple[]","internalType":"struct PDPVerifier.RootData[]","components":[{"name":"root","type":"tuple","internalType":"struct Cids.Cid","components":[{"name":"data","type":"bytes","internalType":"bytes"}]},{"name":"rawSize","type":"uint256","internalType":"uint256"}]},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"nonpayable"},{"type":"function","name":"calculateProofFee","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"estimatedGasFee","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"claimProofSetOwnership","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"createProofSet","inputs":[{"name":"listenerAddr","type":"address","internalType":"address"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"payable"},{"type":"function","name":"deleteProofSet","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"findRootIds","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"leafIndexs","type":"uint256[]","internalType":"uint256[]"}],"outputs":[{"name":"","type":"tuple[]","internalType":"struct PDPVerifier.RootIdAndOffset[]","components":[{"name":"rootId","type":"uint256","internalType":"uint256"},{"name":"offset","type":"uint256","internalType":"uint256"}]}],"stateMutability":"view"},{"type":"function","name":"getChallengeFinality","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getChallengeRange","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getFILUSDPrice","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"},{"name":"","type":"int32","internalType":"int32"}],"stateMutability":"view"},{"type":"function","name":"getNextChallengeEpoch","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getNextProofSetId","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"getNextRootId","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getProofSetLastProvenEpoch","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getProofSetLeafCount","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getProofSetListener","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"getProofSetOwner","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"address","internalType":"address"},{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"getRandomness","inputs":[{"name":"epoch","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getRootCid","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"tuple","internalType":"struct Cids.Cid","components":[{"name":"data","type":"bytes","internalType":"bytes"}]}],"stateMutability":"view"},{"type":"function","name":"getRootLeafCount","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getScheduledRemovals","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256[]","internalType":"uint256[]"}],"stateMutability":"view"},{"type":"function","name":"initialize","inputs":[{"name":"_challengeFinality","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"nextProvingPeriod","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"challengeEpoch","type":"uint256","internalType":"uint256"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"proofSetLive","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"proposeProofSetOwner","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"newOwner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"provePossession","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"proofs","type":"tuple[]","internalType":"struct PDPVerifier.Proof[]","components":[{"name":"leaf","type":"bytes32","internalType":"bytes32"},{"name":"proof","type":"bytes32[]","internalType":"bytes32[]"}]}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"proxiableUUID","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"renounceOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"rootChallengable","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"rootLive","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"scheduleRemovals","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"rootIds","type":"uint256[]","internalType":"uint256[]"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"transferOwnership","inputs":[{"name":"newOwner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"upgradeToAndCall","inputs":[{"name":"newImplementation","type":"address","internalType":"address"},{"name":"data","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"payable"},{"type":"event","name":"Debug","inputs":[{"name":"message","type":"string","indexed":false,"internalType":"string"},{"name":"value","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"Initialized","inputs":[{"name":"version","type":"uint64","indexed":false,"internalType":"uint64"}],"anonymous":false},{"type":"event","name":"NextProvingPeriod","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"challengeEpoch","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"leafCount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"OwnershipTransferred","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"PossessionProven","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"challenges","type":"tuple[]","indexed":false,"internalType":"struct PDPVerifier.RootIdAndOffset[]","components":[{"name":"rootId","type":"uint256","internalType":"uint256"},{"name":"offset","type":"uint256","internalType":"uint256"}]}],"anonymous":false},{"type":"event","name":"ProofFeePaid","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"fee","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"price","type":"uint64","indexed":false,"internalType":"uint64"},{"name":"expo","type":"int32","indexed":false,"internalType":"int32"}],"anonymous":false},{"type":"event","name":"ProofSetCreated","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"owner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"ProofSetDeleted","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"deletedLeafCount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"ProofSetEmpty","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"ProofSetOwnerChanged","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"oldOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"RootsAdded","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"rootIds","type":"uint256[]","indexed":false,"internalType":"uint256[]"}],"anonymous":false},{"type":"event","name":"RootsRemoved","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"rootIds","type":"uint256[]","indexed":false,"internalType":"uint256[]"}],"anonymous":false},{"type":"event","name":"Upgraded","inputs":[{"name":"implementation","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"error","name":"AddressEmptyCode","inputs":[{"name":"target","type":"address","internalType":"address"}]},{"type":"error","name":"ERC1967InvalidImplementation","inputs":[{"name":"implementation","type":"address","internalType":"address"}]},{"type":"error","name":"ERC1967NonPayable","inputs":[]},{"type":"error","name":"FailedCall","inputs":[]},{"type":"error","name":"IndexedError","inputs":[{"name":"idx","type":"uint256","internalType":"uint256"},{"name":"msg","type":"string","internalType":"string"}]},{"type":"error","name":"InvalidInitialization","inputs":[]},{"type":"error","name":"NotInitializing","inputs":[]},{"type":"error","name":"OwnableInvalidOwner","inputs":[{"name":"owner","type":"address","internalType":"address"}]},{"type":"error","name":"OwnableUnauthorizedAccount","inputs":[{"name":"account","type":"address","internalType":"address"}]},{"type":"error","name":"UUPSUnauthorizedCallContext","inputs":[]},{"type":"error","name":"UUPSUnsupportedProxiableUUID","inputs":[{"name":"slot","type":"bytes32","internalType":"bytes32"}]}],"bytecode":{"object":"0x60a06040523073ffffffffffffffffffffffffffffffffffffffff1660809073ffffffffffffffffffffffffffffffffffffffff1681525034801562000043575f80fd5b50620000546200005a60201b60201c565b620001c4565b5f6200006b6200015e60201b60201c565b9050805f0160089054906101000a900460ff1615620000b6576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8016815f015f9054906101000a900467ffffffffffffffff1667ffffffffffffffff16146200015b5767ffffffffffffffff815f015f6101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055507fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d267ffffffffffffffff604051620001529190620001a9565b60405180910390a15b50565b5f7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00905090565b5f67ffffffffffffffff82169050919050565b620001a38162000185565b82525050565b5f602082019050620001be5f83018462000198565b92915050565b608051617d10620001eb5f395f8181613de901528181613e3e0152613ff80152617d105ff3fe608060405260043610610271575f3560e01c806367e406d51161014e5780639f8cb3bd116100c0578063f2fde38b11610079578063f2fde38b146109c5578063f58f952b146109ed578063f5cac1ba14610a09578063f83758fe14610a45578063faa6716314610a6f578063fe4b84df14610aab57610271565b80639f8cb3bd146108b9578063ad3cb1cc146108e3578063c0e159491461090d578063d49245c114610937578063ee3dac6514610973578063f178b1be1461099b57610271565b806371cf2a161161011257806371cf2a1614610789578063847d1d06146107c557806389208ba9146107ed5780638da5cb5b146108295780638ea417e5146108535780639153e64b1461087d57610271565b806367e406d5146106a95780636ba4608f146106d35780636cb55c161461070f5780636fa4469214610737578063715018a61461077357610271565b80633f84135f116101e757806347331050116101ab57806347331050146105965780634903704a146105d25780634f1ef2861461060e5780634fa279201461062a57806352d1902d1461065557806361a52a361461067f57610271565b80633f84135f1461048f578063453f4f62146104cb57806345c0b92d14610507578063462dd4491461052f5780634726075b1461055957610271565b806315b175701161023957806315b175701461037157806316e2bcd51461039b57806319c75950146103c557806331601226146103ef5780633b68e4e91461042b5780633b7ae9131461045357610271565b8063029b4646146102755780630528a55b1461029f5780630a4d7932146102db5780630a6a63f11461030b57806311c0ee4a14610335575b5f80fd5b348015610280575f80fd5b50610289610ad3565b6040516102969190614f24565b60405180910390f35b3480156102aa575f80fd5b506102c560048036038101906102c09190614fd9565b610ad9565b6040516102d2919061511a565b60405180910390f35b6102f560048036038101906102f091906151e9565b610bc8565b6040516103029190614f24565b60405180910390f35b348015610316575f80fd5b5061031f610ee8565b60405161032c9190615255565b60405180910390f35b348015610340575f80fd5b5061035b600480360381019061035691906152c3565b610f00565b6040516103689190614f24565b60405180910390f35b34801561037c575f80fd5b506103856112a4565b6040516103929190615255565b60405180910390f35b3480156103a6575f80fd5b506103af6112bc565b6040516103bc9190614f24565b60405180910390f35b3480156103d0575f80fd5b506103d96112c7565b6040516103e6919061536c565b60405180910390f35b3480156103fa575f80fd5b5061041560048036038101906104109190615385565b6112ed565b6040516104229190615255565b60405180910390f35b348015610436575f80fd5b50610451600480360381019061044c91906153b0565b61136e565b005b34801561045e575f80fd5b5061047960048036038101906104749190615441565b6116b6565b6040516104869190615530565b60405180910390f35b34801561049a575f80fd5b506104b560048036038101906104b09190615385565b6117c8565b6040516104c29190614f24565b60405180910390f35b3480156104d6575f80fd5b506104f160048036038101906104ec9190615385565b61182a565b6040516104fe9190614f24565b60405180910390f35b348015610512575f80fd5b5061052d60048036038101906105289190615550565b611924565b005b34801561053a575f80fd5b50610543611e04565b6040516105509190614f24565b60405180910390f35b348015610564575f80fd5b5061057f600480360381019061057a9190615385565b611e08565b60405161058d9291906155c1565b60405180910390f35b3480156105a1575f80fd5b506105bc60048036038101906105b79190615441565b611ebd565b6040516105c99190615602565b60405180910390f35b3480156105dd575f80fd5b506105f860048036038101906105f39190615441565b611f16565b6040516106059190614f24565b60405180910390f35b61062860048036038101906106239190615743565b611f7a565b005b348015610635575f80fd5b5061063e611f99565b60405161064c9291906157da565b60405180910390f35b348015610660575f80fd5b506106696120ab565b604051610676919061536c565b60405180910390f35b34801561068a575f80fd5b506106936120dc565b6040516106a09190614f24565b60405180910390f35b3480156106b4575f80fd5b506106bd6120e3565b6040516106ca919061585c565b60405180910390f35b3480156106de575f80fd5b506106f960048036038101906106f49190615385565b6120fb565b6040516107069190614f24565b60405180910390f35b34801561071a575f80fd5b5061073560048036038101906107309190615875565b61215d565b005b348015610742575f80fd5b5061075d60048036038101906107589190615385565b612307565b60405161076a919061595b565b60405180910390f35b34801561077e575f80fd5b50610787612416565b005b348015610794575f80fd5b506107af60048036038101906107aa9190615441565b612429565b6040516107bc9190615602565b60405180910390f35b3480156107d0575f80fd5b506107eb60048036038101906107e6919061597b565b612517565b005b3480156107f8575f80fd5b50610813600480360381019061080e9190615385565b612817565b6040516108209190614f24565b60405180910390f35b348015610834575f80fd5b5061083d612879565b60405161084a9190615255565b60405180910390f35b34801561085e575f80fd5b506108676128ae565b60405161087491906159d8565b60405180910390f35b348015610888575f80fd5b506108a3600480360381019061089e9190615441565b6128ca565b6040516108b09190614f24565b60405180910390f35b3480156108c4575f80fd5b506108cd61293c565b6040516108da9190614f24565b60405180910390f35b3480156108ee575f80fd5b506108f7612942565b6040516109049190615a43565b60405180910390f35b348015610918575f80fd5b5061092161297b565b60405161092e9190614f24565b60405180910390f35b348015610942575f80fd5b5061095d60048036038101906109589190615385565b612980565b60405161096a9190614f24565b60405180910390f35b34801561097e575f80fd5b5061099960048036038101906109949190615385565b6129e2565b005b3480156109a6575f80fd5b506109af612bdd565b6040516109bc9190614f24565b60405180910390f35b3480156109d0575f80fd5b506109eb60048036038101906109e69190615a63565b612be1565b005b610a076004803603810190610a029190615ae3565b612c65565b005b348015610a14575f80fd5b50610a2f6004803603810190610a2a9190615385565b613215565b604051610a3c9190615602565b60405180910390f35b348015610a50575f80fd5b50610a596132a7565b604051610a669190614f24565b60405180910390f35b348015610a7a575f80fd5b50610a956004803603810190610a909190615385565b6132af565b604051610aa29190614f24565b60405180910390f35b348015610ab6575f80fd5b50610ad16004803603810190610acc9190615385565b613311565b005b61080081565b60605f610af660055f8781526020019081526020015f20546134a0565b610100610b039190615b6d565b90505f8484905067ffffffffffffffff811115610b2357610b2261561f565b5b604051908082528060200260200182016040528015610b5c57816020015b610b49614e89565b815260200190600190039081610b415790505b5090505f5b85859050811015610bbb57610b9087878784818110610b8357610b82615ba0565b5b90506020020135856135c9565b828281518110610ba357610ba2615ba0565b5b60200260200101819052508080600101915050610b61565b5080925050509392505050565b5f610800838390501115610c11576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c0890615c17565b60405180910390fd5b5f610c1a6137d4565b905080341015610c5f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c5690615c7f565b60405180910390fd5b610c68816137fb565b80341115610cc0573373ffffffffffffffffffffffffffffffffffffffff166108fc8234610c969190615b6d565b90811502906040515f60405180830381858888f19350505050158015610cbe573d5f803e3d5ffd5b505b5f60015f81819054906101000a900467ffffffffffffffff1680929190610ce690615c9d565b91906101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555067ffffffffffffffff1690505f60065f8381526020019081526020015f20819055505f60075f8381526020019081526020015f208190555033600b5f8381526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508560085f8381526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505f600d5f8381526020019081526020015f20819055505f73ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff1614610e98578573ffffffffffffffffffffffffffffffffffffffff166394d41b36823388886040518563ffffffff1660e01b8152600401610e6a9493929190615d08565b5f604051808303815f87803b158015610e81575f80fd5b505af1158015610e93573d5f803e3d5ffd5b505050505b3373ffffffffffffffffffffffffffffffffffffffff16817f017f0b33d96e8f9968590172013032c2346cf047787a5e17a44b0a1bb3cd0f0160405160405180910390a380925050509392505050565b73ff0000000000000000000000000000000000006381565b5f610800838390501115610f49576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610f4090615c17565b60405180910390fd5b610f5286613215565b610f91576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610f8890615d90565b60405180910390fd5b5f8585905011610fd6576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610fcd90615df8565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8881526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614611074576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161106b90615e60565b60405180910390fd5b5f60055f8881526020019081526020015f205490505f8686905067ffffffffffffffff8111156110a7576110a661561f565b5b6040519080825280602002602001820160405280156110d55781602001602082028036833780820191505090505b5090505f5b878790508110156111845761114b89828a8a858181106110fd576110fc615ba0565b5b905060200281019061110f9190615e8a565b805f019061111d9190615eb1565b8b8b868181106111305761112f615ba0565b5b90506020028101906111429190615e8a565b602001356138fe565b5080836111589190615ed8565b82828151811061116b5761116a615ba0565b5b60200260200101818152505080806001019150506110da565b50877f5ce51a8003915c377679ba533d9dafa0792058b254965697e674272f13f4fdd3826040516111b5919061595b565b60405180910390a25f60085f8a81526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614611295578073ffffffffffffffffffffffffffffffffffffffff166312d5d66f8a858b8b8b8b6040518763ffffffff1660e01b81526004016112679695949392919061612e565b5f604051808303815f87803b15801561127e575f80fd5b505af1158015611290573d5f803e3d5ffd5b505050505b82935050505095945050505050565b73fe0000000000000000000000000000000000000681565b660400000000000081565b7f150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e5f1b81565b5f6112f782613215565b611336576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161132d90615d90565b60405180910390fd5b60085f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050919050565b6108008282905011156113b6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016113ad90615c17565b60405180910390fd5b6113bf85613215565b6113fe576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016113f590615d90565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8781526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161461149c576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611493906161f3565b60405180910390fd5b6107d0600a5f8781526020019081526020015f2080549050858590506114c29190615ed8565b1115611503576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016114fa90616281565b60405180910390fd5b5f5b848490508110156115d75760055f8781526020019081526020015f205485858381811061153557611534615ba0565b5b905060200201351061157c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016115739061630f565b60405180910390fd5b600a5f8781526020019081526020015f208585838181106115a05761159f615ba0565b5b90506020020135908060018154018082558091505060019003905f5260205f20015f90919091909150558080600101915050611505565b505f60085f8781526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146116ae578073ffffffffffffffffffffffffffffffffffffffff16634af7d1d287878787876040518663ffffffff1660e01b8152600401611680959493929190616395565b5f604051808303815f87803b158015611697575f80fd5b505af11580156116a9573d5f803e3d5ffd5b505050505b505050505050565b6116be614ea1565b6116c783613215565b611706576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016116fd90615d90565b60405180910390fd5b60025f8481526020019081526020015f205f8381526020019081526020015f206040518060200160405290815f8201805461174090616409565b80601f016020809104026020016040519081016040528092919081815260200182805461176c90616409565b80156117b75780601f1061178e576101008083540402835291602001916117b7565b820191905f5260205f20905b81548152906001019060200180831161179a57829003601f168201915b505050505081525050905092915050565b5f6117d282613215565b611811576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161180890615d90565b60405180910390fd5b60065f8381526020019081526020015f20549050919050565b5f805f73fe0000000000000000000000000000000000000673ffffffffffffffffffffffffffffffffffffffff16846040516020016118699190616459565b60405160208183030381529060405260405161188591906164ad565b5f60405180830381855afa9150503d805f81146118bd576040519150601f19603f3d011682016040523d82523d5f602084013e6118c2565b606091505b509150915081611907576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016118fe90616533565b60405180910390fd5b8080602001905181019061191b9190616565565b92505050919050565b61080082829050111561196c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161196390615c17565b60405180910390fd5b600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614611a0a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611a0190616600565b60405180910390fd5b5f60065f8681526020019081526020015f205411611a5d576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611a549061668e565b60405180910390fd5b5f600d5f8681526020019081526020015f205403611a8c5743600d5f8681526020019081526020015f20819055505b5f600a5f8681526020019081526020015f2090505f818054905067ffffffffffffffff811115611abf57611abe61561f565b5b604051908082528060200260200182016040528015611aed5781602001602082028036833780820191505090505b5090505f5b8151811015611b77578260018480549050611b0d9190615b6d565b81548110611b1e57611b1d615ba0565b5b905f5260205f200154828281518110611b3a57611b39615ba0565b5b60200260200101818152505082805480611b5757611b566166ac565b5b600190038181905f5260205f20015f905590558080600101915050611af2565b50611b828682613aab565b857fd22bb0ee05b8ca92312459c76223d3b9bc1bd96fb6c9b18e637ededf92d8117482604051611bb2919061595b565b60405180910390a260065f8781526020019081526020015f205460095f8881526020019081526020015f20819055505f5443611bee9190615ed8565b851015611c30576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611c279061676f565b60405180910390fd5b8460075f8881526020019081526020015f20819055505f60065f8881526020019081526020015f205403611cb857857f323c29bc8d678a5d987b90a321982d10b9a91bcad071a9e445879497bf0e68e760405160405180910390a25f600d5f8881526020019081526020015f20819055505f60075f8881526020019081526020015f20819055505b5f60085f8881526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614611db0578073ffffffffffffffffffffffffffffffffffffffff1663aa27ebcc8860075f8b81526020019081526020015f205460065f8c81526020019081526020015f205489896040518663ffffffff1660e01b8152600401611d8295949392919061678d565b5f604051808303815f87803b158015611d99575f80fd5b505af1158015611dab573d5f803e3d5ffd5b505050505b867fc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e8760065f8b81526020019081526020015f2054604051611df39291906167d9565b60405180910390a250505050505050565b5f81565b5f80611e1383613215565b611e52576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611e4990615d90565b60405180910390fd5b600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600c5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691509150915091565b5f611ec783613215565b8015611ee3575060055f8481526020019081526020015f205482105b8015611f0e57505f60035f8581526020019081526020015f205f8481526020019081526020015f2054115b905092915050565b5f8060095f8581526020019081526020015f20546020611f369190616800565b90505f80611f42611f99565b91509150611f6f85838386600d5f8c81526020019081526020015f205443611f6a9190615b6d565b613b6a565b935050505092915050565b611f82613de7565b611f8b82613ecd565b611f958282613ed8565b5050565b5f805f73a2aa501b19aff244d90cc15a4cf739d2725b572973ffffffffffffffffffffffffffffffffffffffff1663a4ae35e07f150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e5f1b620151806040518363ffffffff1660e01b8152600401612010929190616841565b608060405180830381865afa15801561202b573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061204f919061696b565b90505f815f015160070b13612099576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161209090616a06565b60405180910390fd5b805f0151816040015192509250509091565b5f6120b4613ff6565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b905090565b6201518081565b73a2aa501b19aff244d90cc15a4cf739d2725b572981565b5f61210582613215565b612144576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161213b90615d90565b60405180910390fd5b60075f8381526020019081526020015f20549050919050565b61216682613215565b6121a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161219c90615d90565b60405180910390fd5b5f600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690503373ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614612247576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161223e90616a94565b60405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16036122b257600c5f8481526020019081526020015f205f6101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055612302565b81600c5f8581526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b505050565b606061231282613215565b612351576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161234890615d90565b60405180910390fd5b5f600a5f8481526020019081526020015f2090505f818054905067ffffffffffffffff8111156123845761238361561f565b5b6040519080825280602002602001820160405280156123b25781602001602082028036833780820191505090505b5090505f5b828054905081101561240b578281815481106123d6576123d5615ba0565b5b905f5260205f2001548282815181106123f2576123f1615ba0565b5b60200260200101818152505080806001019150506123b7565b508092505050919050565b61241e61407d565b6124275f614104565b565b5f8061244560055f8681526020019081526020015f20546134a0565b6101006124529190615b6d565b90505f61247d85600160095f8981526020019081526020015f20546124779190615b6d565b846135c9565b9050600160035f8781526020019081526020015f205f835f015181526020019081526020015f20546124af9190615b6d565b8160200151146124f4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016124eb90616b22565b60405180910390fd5b6124fe8585611ebd565b801561250d5750805f01518411155b9250505092915050565b61080082829050111561255f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161255690615c17565b60405180910390fd5b60015f9054906101000a900467ffffffffffffffff1667ffffffffffffffff1683106125c0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016125b790616b8a565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161461265e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161265590616c18565b60405180910390fd5b5f60065f8581526020019081526020015f205490505f60065f8681526020019081526020015f20819055505f600b5f8681526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505f60075f8681526020019081526020015f20819055505f600d5f8681526020019081526020015f20819055505f60085f8681526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146127d8578073ffffffffffffffffffffffffffffffffffffffff166326c249e3868487876040518563ffffffff1660e01b81526004016127aa9493929190616c36565b5f604051808303815f87803b1580156127c1575f80fd5b505af11580156127d3573d5f803e3d5ffd5b505050505b847f589e9a441b5bddda77c4ab647b0108764a9cc1a7f655aa9b7bc50b8bdfab8673836040516128089190614f24565b60405180910390a25050505050565b5f61282182613215565b612860576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161285790615d90565b60405180910390fd5b60095f8381526020019081526020015f20549050919050565b5f806128836141d5565b9050805f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691505090565b5f60015f9054906101000a900467ffffffffffffffff16905090565b5f6128d483613215565b612913576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161290a90615d90565b60405180910390fd5b60035f8481526020019081526020015f205f8381526020019081526020015f2054905092915050565b6107d081565b6040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b602081565b5f61298a82613215565b6129c9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016129c090615d90565b60405180910390fd5b60055f8381526020019081526020015f20549050919050565b6129eb81613215565b612a2a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612a2190615d90565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600c5f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614612ac8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612abf90616ce4565b60405180910390fd5b5f600b5f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905033600b5f8481526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600c5f8381526020019081526020015f205f6101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690553373ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837fd3273037b635678293ef0c076bd77af13760e75e12806d1db237616d03c3a76660405160405180910390a45050565b5f81565b612be961407d565b5f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603612c59575f6040517f1e4fbdf7000000000000000000000000000000000000000000000000000000008152600401612c509190615255565b60405180910390fd5b612c6281614104565b50565b5f5a9050600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614612d07576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612cfe90616600565b60405180910390fd5b5f60075f8681526020019081526020015f2054905080431015612d5f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612d5690616d4c565b60405180910390fd5b5f8484905011612da4576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612d9b90616db4565b60405180910390fd5b5f8103612de6576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612ddd90616e1c565b60405180910390fd5b5f8484905067ffffffffffffffff811115612e0457612e0361561f565b5b604051908082528060200260200182016040528015612e3d57816020015b612e2a614e89565b815260200190600190039081612e225790505b5090505f612e4a876141fc565b90505f60095f8981526020019081526020015f205490505f612e7c60055f8b81526020019081526020015f20546134a0565b610100612e899190615b6d565b90505f5b888890508167ffffffffffffffff16101561308b575f848b83604051602001612eb893929190616e6e565b60405160208183030381529060405290505f8482805190602001205f1c612edf9190616ed7565b9050612eec8c82866135c9565b878467ffffffffffffffff1681518110612f0957612f08615ba0565b5b60200260200101819052505f612f4e612f498e8a8767ffffffffffffffff1681518110612f3957612f38615ba0565b5b60200260200101515f01516116b6565b61421e565b90505f6130328d8d8767ffffffffffffffff16818110612f7157612f70615ba0565b5b9050602002810190612f839190616f07565b8060200190612f929190616f2e565b808060200260200160405190810160405280939291908181526020018383602002808284375f81840152601f19601f82011690508083019250505050505050838f8f8967ffffffffffffffff16818110612fef57612fee615ba0565b5b90506020028101906130019190616f07565b5f01358c8967ffffffffffffffff168151811061302157613020615ba0565b5b602002602001015160200151614360565b905080613074576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161306b90616fda565b60405180910390fd5b50505050808061308390615c9d565b915050612e8d565b505f610514602061309c8b8b614378565b6130a69190615ed8565b6130b09190616800565b5a886130bc9190615b6d565b6130c69190615ed8565b90506130d28a826143fd565b5f60085f8c81526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146131ba578073ffffffffffffffffffffffffffffffffffffffff1663356de02b8c60065f8f81526020019081526020015f2054888e8e90506040518563ffffffff1660e01b815260040161318c9493929190616ff8565b5f604051808303815f87803b1580156131a3575f80fd5b505af11580156131b5573d5f803e3d5ffd5b505050505b43600d5f8d81526020019081526020015f20819055508a7f1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e5087604051613200919061511a565b60405180910390a25050505050505050505050565b5f60015f9054906101000a900467ffffffffffffffff1667ffffffffffffffff16821080156132a057505f73ffffffffffffffffffffffffffffffffffffffff16600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b9050919050565b5f8054905090565b5f6132b982613215565b6132f8576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016132ef90615d90565b60405180910390fd5b600d5f8381526020019081526020015f20549050919050565b5f61331a61450d565b90505f815f0160089054906101000a900460ff161590505f825f015f9054906101000a900467ffffffffffffffff1690505f808267ffffffffffffffff161480156133625750825b90505f60018367ffffffffffffffff1614801561339557505f3073ffffffffffffffffffffffffffffffffffffffff163b145b9050811580156133a3575080155b156133da576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001855f015f6101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055508315613427576001855f0160086101000a81548160ff0219169083151502179055505b61343033614534565b613438614548565b855f819055508315613498575f855f0160086101000a81548160ff0219169083151502179055507fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2600160405161348f9190617074565b60405180910390a15b505050505050565b5f8061010090505f608084901c90505f81146134c9576080826134c39190615b6d565b91508093505b604084901c90505f81146134ea576040826134e49190615b6d565b91508093505b602084901c90505f811461350b576020826135059190615b6d565b91508093505b601084901c90505f811461352c576010826135269190615b6d565b91508093505b600884901c90505f811461354d576008826135479190615b6d565b91508093505b600484901c90505f811461356e576004826135689190615b6d565b91508093505b600284901c90505f811461358f576002826135899190615b6d565b91508093505b600184901c90505f81146135b3576002826135aa9190615b6d565b925050506135c4565b83826135bf9190615b6d565b925050505b919050565b6135d1614e89565b60065f8581526020019081526020015f20548310613624576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161361b906170d7565b60405180910390fd5b5f6001836001901b6136369190615b6d565b90505f80808590505b5f81111561373b5760055f8981526020019081526020015f205484106136825760018161366c9190615b6d565b6001901b8461367b9190615b6d565b9350613728565b60045f8981526020019081526020015f205f8581526020019081526020015f2054836136ae9190615ed8565b91508682116137085760045f8981526020019081526020015f205f8581526020019081526020015f2054836136e39190615ed8565b92506001816136f29190615b6d565b6001901b846137019190615ed8565b9350613727565b6001816137159190615b6d565b6001901b846137249190615b6d565b93505b5b8080613733906170f5565b91505061363f565b5060045f8881526020019081526020015f205f8481526020019081526020015f2054826137689190615ed8565b90508581116137a75760405180604001604052806001856137899190615ed8565b8152602001828861379a9190615b6d565b81525093505050506137cd565b604051806040016040528084815260200183886137c49190615b6d565b81525093505050505b9392505050565b5f600a6001670de0b6b3a76400006137ec9190616800565b6137f6919061711c565b905090565b8034101561383e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161383590617196565b60405180910390fd5b5f73ff0000000000000000000000000000000000006373ffffffffffffffffffffffffffffffffffffffff1682604051613877906171d7565b5f6040518083038185875af1925050503d805f81146138b1576040519150601f19603f3d011682016040523d82523d5f602084013e6138b6565b606091505b50509050806138fa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016138f190617235565b60405180910390fd5b5050565b5f8060208361390d9190616ed7565b1461394f57836040517fc7b67cf3000000000000000000000000000000000000000000000000000000008152600401613946919061729d565b60405180910390fd5b5f820361399357836040517fc7b67cf300000000000000000000000000000000000000000000000000000000815260040161398a9190617313565b60405180910390fd5b66040000000000008211156139df57836040517fc7b67cf30000000000000000000000000000000000000000000000000000000081526004016139d69190617389565b60405180910390fd5b5f6020836139ed919061711c565b90505f60055f8881526020019081526020015f205f815480929190613a11906173b5565b919050559050613a22878383614552565b8460025f8981526020019081526020015f205f8381526020019081526020015f208181613a4f91906176fc565b9050508160035f8981526020019081526020015f205f8381526020019081526020015f20819055508160065f8981526020019081526020015f205f828254613a979190615ed8565b925050819055508092505050949350505050565b613ab482613215565b613af3576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613aea90615d90565b60405180910390fd5b5f805b8251811015613b3d57613b2384848381518110613b1657613b15615ba0565b5b60200260200101516145ed565b82613b2e9190615ed8565b91508080600101915050613af6565b508060065f8581526020019081526020015f205f828254613b5e9190615b6d565b92505081905550505050565b5f80861180613b7857505f48145b613bb7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613bae9061777a565b60405180910390fd5b5f8567ffffffffffffffff1611613c03576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613bfa90617808565b60405180910390fd5b5f8311613c45576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613c3c90617896565b60405180910390fd5b5f808560030b12613cc35784600a613c5d91906179f2565b8667ffffffffffffffff166201518065010000000000613c7d9190616800565b613c879190616800565b613c919190616800565b6001670de0b6b3a7640000613ca69190616800565b6002613cb29190616800565b613cbc919061711c565b9050613d3b565b8567ffffffffffffffff166201518065010000000000613ce39190616800565b613ced9190616800565b85613cf790617a3c565b600a613d0391906179f2565b6001670de0b6b3a7640000613d189190616800565b6002613d249190616800565b613d2e9190616800565b613d38919061711c565b90505b5f848483613d499190616800565b613d539190616800565b90505f6064600583613d659190616800565b613d6f919061711c565b90505f6064600484613d819190616800565b613d8b919061711c565b9050818a10613da0575f945050505050613dde565b808a10613dbe578982613db39190615b6d565b945050505050613dde565b6064600184613dcd9190616800565b613dd7919061711c565b9450505050505b95945050505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161480613e9457507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16613e7b61467b565b73ffffffffffffffffffffffffffffffffffffffff1614155b15613ecb576040517fe07c8dba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b613ed561407d565b50565b8173ffffffffffffffffffffffffffffffffffffffff166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa925050508015613f4057506040513d601f19601f82011682018060405250810190613f3d9190617aac565b60015b613f8157816040517f4c9c8ce3000000000000000000000000000000000000000000000000000000008152600401613f789190615255565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b8114613fe757806040517faa1d49a4000000000000000000000000000000000000000000000000000000008152600401613fde919061536c565b60405180910390fd5b613ff183836146ce565b505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161461407b576040517fe07c8dba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b614085614740565b73ffffffffffffffffffffffffffffffffffffffff166140a3612879565b73ffffffffffffffffffffffffffffffffffffffff1614614102576140c6614740565b6040517f118cdaa70000000000000000000000000000000000000000000000000000000081526004016140f99190615255565b60405180910390fd5b565b5f61410d6141d5565b90505f815f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905082825f015f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508273ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3505050565b5f7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300905090565b5f61421760075f8481526020019081526020015f205461182a565b9050919050565b5f6020825f0151511015614267576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161425e90617b21565b60405180910390fd5b5f602067ffffffffffffffff8111156142835761428261561f565b5b6040519080825280601f01601f1916602001820160405280156142b55781602001600182028036833780820191505090505b5090505f5b602081101561434d57835f0151816020865f0151516142d99190615b6d565b6142e39190615ed8565b815181106142f4576142f3615ba0565b5b602001015160f81c60f81b82828151811061431257614311615ba0565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a90535080806001019150506142ba565b508061435890617b62565b915050919050565b5f8361436d868585614747565b149050949350505050565b5f805f90505f5b848490508110156143f257602085858381811061439f5761439e615ba0565b5b90506020028101906143b19190616f07565b80602001906143c09190616f2e565b90506143cc9190616800565b60406143d89190615ed8565b826143e39190615ed8565b9150808060010191505061437f565b508091505092915050565b5f488261440a9190616800565b90505f60095f8581526020019081526020015f2054602061442b9190616800565b90505f80614437611f99565b915091505f61446585848487600d5f8d81526020019081526020015f2054436144609190615b6d565b613b6a565b9050614470816137fb565b803411156144c8573373ffffffffffffffffffffffffffffffffffffffff166108fc823461449e9190615b6d565b90811502906040515f60405180830381858888f193505050501580156144c6573d5f803e3d5ffd5b505b867f928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b8285856040516144fc93929190617bc8565b60405180910390a250505050505050565b5f7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00905090565b61453c6147cf565b6145458161480f565b50565b6145506147cf565b565b5f8190505f61456082614893565b90505f8490505f5b828110156145bf575f816001901b856145819190615b6d565b905060045f8981526020019081526020015f205f8281526020019081526020015f2054836145af9190615ed8565b9250508080600101915050614568565b508060045f8881526020019081526020015f205f8681526020019081526020015f2081905550505050505050565b5f8060035f8581526020019081526020015f205f8481526020019081526020015f2054905061461d8484836148b0565b60035f8581526020019081526020015f205f8481526020019081526020015f205f905560025f8581526020019081526020015f205f8481526020019081526020015f205f8082015f61466f9190614eb4565b50508091505092915050565b5f6146a77f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b61496b565b5f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b6146d782614974565b8173ffffffffffffffffffffffffffffffffffffffff167fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b60405160405180910390a25f815111156147335761472d8282614a3d565b5061473c565b61473b614abd565b5b5050565b5f33905090565b5f808390505f5b85518110156147c3575f86828151811061476b5761476a615ba0565b5b602002602001015190505f6002866147839190616ed7565b03614799576147928382614af9565b92506147a6565b6147a38184614af9565b92505b6002856147b3919061711c565b945050808060010191505061474e565b50809150509392505050565b6147d7614b0c565b61480d576040517fd7e6bcf800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b6148176147cf565b5f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603614887575f6040517f1e4fbdf700000000000000000000000000000000000000000000000000000000815260040161487e9190615255565b60405180910390fd5b61489081614104565b50565b5f6148a96001836148a49190615ed8565b614b2a565b9050919050565b5f6148cb60055f8681526020019081526020015f20546134a0565b6101006148d89190615b6d565b90505f6148e484614893565b90505b818111158015614907575060055f8681526020019081526020015f205484105b15614964578260045f8781526020019081526020015f205f8681526020019081526020015f205f82825461493b9190615b6d565b92505081905550806001901b846149529190615ed8565b935061495d84614893565b90506148e7565b5050505050565b5f819050919050565b5f8173ffffffffffffffffffffffffffffffffffffffff163b036149cf57806040517f4c9c8ce30000000000000000000000000000000000000000000000000000000081526004016149c69190615255565b60405180910390fd5b806149fb7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b61496b565b5f015f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b60605f808473ffffffffffffffffffffffffffffffffffffffff1684604051614a6691906164ad565b5f60405180830381855af49150503d805f8114614a9e576040519150601f19603f3d011682016040523d82523d5f602084013e614aa3565b606091505b5091509150614ab3858383614d6f565b9250505092915050565b5f341115614af7576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b5f614b048383614dfc565b905092915050565b5f614b1561450d565b5f0160089054906101000a900460ff16905090565b5f7f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821115614b8e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401614b8590617c6d565b60405180910390fd5b5f61010090505f83614b9f90617c94565b905083811690505f8114614bbc578180614bb8906170f5565b9250505b5f6fffffffffffffffffffffffffffffffff821614614be557608082614be29190615b6d565b91505b5f77ffffffffffffffff0000000000000000ffffffffffffffff821614614c1657604082614c139190615b6d565b91505b5f7bffffffff00000000ffffffff00000000ffffffff00000000ffffffff821614614c4b57602082614c489190615b6d565b91505b5f7dffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff821614614c8257601082614c7f9190615b6d565b91505b5f7eff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff821614614cba57600882614cb79190615b6d565b91505b5f7f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f821614614cf357600482614cf09190615b6d565b91505b5f7f3333333333333333333333333333333333333333333333333333333333333333821614614d2c57600282614d299190615b6d565b91505b5f7f5555555555555555555555555555555555555555555555555555555555555555821614614d6557600182614d629190615b6d565b91505b8192505050919050565b606082614d8457614d7f82614e45565b614df4565b5f8251148015614daa57505f8473ffffffffffffffffffffffffffffffffffffffff163b145b15614dec57836040517f9996b315000000000000000000000000000000000000000000000000000000008152600401614de39190615255565b60405180910390fd5b819050614df5565b5b9392505050565b5f825f528160205260205f60405f60025afa614e16575f80fd5b5f5190507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3f8116905092915050565b5f81511115614e575780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405180604001604052805f81526020015f81525090565b6040518060200160405280606081525090565b508054614ec090616409565b5f825580601f10614ed15750614eee565b601f0160209004905f5260205f2090810190614eed9190614ef1565b5b50565b5b80821115614f08575f815f905550600101614ef2565b5090565b5f819050919050565b614f1e81614f0c565b82525050565b5f602082019050614f375f830184614f15565b92915050565b5f604051905090565b5f80fd5b5f80fd5b614f5781614f0c565b8114614f61575f80fd5b50565b5f81359050614f7281614f4e565b92915050565b5f80fd5b5f80fd5b5f80fd5b5f8083601f840112614f9957614f98614f78565b5b8235905067ffffffffffffffff811115614fb657614fb5614f7c565b5b602083019150836020820283011115614fd257614fd1614f80565b5b9250929050565b5f805f60408486031215614ff057614fef614f46565b5b5f614ffd86828701614f64565b935050602084013567ffffffffffffffff81111561501e5761501d614f4a565b5b61502a86828701614f84565b92509250509250925092565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b61506881614f0c565b82525050565b604082015f8201516150825f85018261505f565b506020820151615095602085018261505f565b50505050565b5f6150a6838361506e565b60408301905092915050565b5f602082019050919050565b5f6150c882615036565b6150d28185615040565b93506150dd83615050565b805f5b8381101561510d5781516150f4888261509b565b97506150ff836150b2565b9250506001810190506150e0565b5085935050505092915050565b5f6020820190508181035f83015261513281846150be565b905092915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6151638261513a565b9050919050565b61517381615159565b811461517d575f80fd5b50565b5f8135905061518e8161516a565b92915050565b5f8083601f8401126151a9576151a8614f78565b5b8235905067ffffffffffffffff8111156151c6576151c5614f7c565b5b6020830191508360018202830111156151e2576151e1614f80565b5b9250929050565b5f805f60408486031215615200576151ff614f46565b5b5f61520d86828701615180565b935050602084013567ffffffffffffffff81111561522e5761522d614f4a565b5b61523a86828701615194565b92509250509250925092565b61524f81615159565b82525050565b5f6020820190506152685f830184615246565b92915050565b5f8083601f84011261528357615282614f78565b5b8235905067ffffffffffffffff8111156152a05761529f614f7c565b5b6020830191508360208202830111156152bc576152bb614f80565b5b9250929050565b5f805f805f606086880312156152dc576152db614f46565b5b5f6152e988828901614f64565b955050602086013567ffffffffffffffff81111561530a57615309614f4a565b5b6153168882890161526e565b9450945050604086013567ffffffffffffffff81111561533957615338614f4a565b5b61534588828901615194565b92509250509295509295909350565b5f819050919050565b61536681615354565b82525050565b5f60208201905061537f5f83018461535d565b92915050565b5f6020828403121561539a57615399614f46565b5b5f6153a784828501614f64565b91505092915050565b5f805f805f606086880312156153c9576153c8614f46565b5b5f6153d688828901614f64565b955050602086013567ffffffffffffffff8111156153f7576153f6614f4a565b5b61540388828901614f84565b9450945050604086013567ffffffffffffffff81111561542657615425614f4a565b5b61543288828901615194565b92509250509295509295909350565b5f806040838503121561545757615456614f46565b5b5f61546485828601614f64565b925050602061547585828601614f64565b9150509250929050565b5f81519050919050565b5f82825260208201905092915050565b5f5b838110156154b657808201518184015260208101905061549b565b5f8484015250505050565b5f601f19601f8301169050919050565b5f6154db8261547f565b6154e58185615489565b93506154f5818560208601615499565b6154fe816154c1565b840191505092915050565b5f602083015f8301518482035f86015261552382826154d1565b9150508091505092915050565b5f6020820190508181035f8301526155488184615509565b905092915050565b5f805f806060858703121561556857615567614f46565b5b5f61557587828801614f64565b945050602061558687828801614f64565b935050604085013567ffffffffffffffff8111156155a7576155a6614f4a565b5b6155b387828801615194565b925092505092959194509250565b5f6040820190506155d45f830185615246565b6155e16020830184615246565b9392505050565b5f8115159050919050565b6155fc816155e8565b82525050565b5f6020820190506156155f8301846155f3565b92915050565b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b615655826154c1565b810181811067ffffffffffffffff821117156156745761567361561f565b5b80604052505050565b5f615686614f3d565b9050615692828261564c565b919050565b5f67ffffffffffffffff8211156156b1576156b061561f565b5b6156ba826154c1565b9050602081019050919050565b828183375f83830152505050565b5f6156e76156e284615697565b61567d565b9050828152602081018484840111156157035761570261561b565b5b61570e8482856156c7565b509392505050565b5f82601f83011261572a57615729614f78565b5b813561573a8482602086016156d5565b91505092915050565b5f806040838503121561575957615758614f46565b5b5f61576685828601615180565b925050602083013567ffffffffffffffff81111561578757615786614f4a565b5b61579385828601615716565b9150509250929050565b5f67ffffffffffffffff82169050919050565b6157b98161579d565b82525050565b5f8160030b9050919050565b6157d4816157bf565b82525050565b5f6040820190506157ed5f8301856157b0565b6157fa60208301846157cb565b9392505050565b5f819050919050565b5f61582461581f61581a8461513a565b615801565b61513a565b9050919050565b5f6158358261580a565b9050919050565b5f6158468261582b565b9050919050565b6158568161583c565b82525050565b5f60208201905061586f5f83018461584d565b92915050565b5f806040838503121561588b5761588a614f46565b5b5f61589885828601614f64565b92505060206158a985828601615180565b9150509250929050565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b5f6158e7838361505f565b60208301905092915050565b5f602082019050919050565b5f615909826158b3565b61591381856158bd565b935061591e836158cd565b805f5b8381101561594e57815161593588826158dc565b9750615940836158f3565b925050600181019050615921565b5085935050505092915050565b5f6020820190508181035f83015261597381846158ff565b905092915050565b5f805f6040848603121561599257615991614f46565b5b5f61599f86828701614f64565b935050602084013567ffffffffffffffff8111156159c0576159bf614f4a565b5b6159cc86828701615194565b92509250509250925092565b5f6020820190506159eb5f8301846157b0565b92915050565b5f81519050919050565b5f82825260208201905092915050565b5f615a15826159f1565b615a1f81856159fb565b9350615a2f818560208601615499565b615a38816154c1565b840191505092915050565b5f6020820190508181035f830152615a5b8184615a0b565b905092915050565b5f60208284031215615a7857615a77614f46565b5b5f615a8584828501615180565b91505092915050565b5f8083601f840112615aa357615aa2614f78565b5b8235905067ffffffffffffffff811115615ac057615abf614f7c565b5b602083019150836020820283011115615adc57615adb614f80565b5b9250929050565b5f805f60408486031215615afa57615af9614f46565b5b5f615b0786828701614f64565b935050602084013567ffffffffffffffff811115615b2857615b27614f4a565b5b615b3486828701615a8e565b92509250509250925092565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f615b7782614f0c565b9150615b8283614f0c565b9250828203905081811115615b9a57615b99615b40565b5b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4578747261206461746120746f6f206c617267650000000000000000000000005f82015250565b5f615c016014836159fb565b9150615c0c82615bcd565b602082019050919050565b5f6020820190508181035f830152615c2e81615bf5565b9050919050565b7f737962696c20666565206e6f74206d65740000000000000000000000000000005f82015250565b5f615c696011836159fb565b9150615c7482615c35565b602082019050919050565b5f6020820190508181035f830152615c9681615c5d565b9050919050565b5f615ca78261579d565b915067ffffffffffffffff8203615cc157615cc0615b40565b5b600182019050919050565b5f82825260208201905092915050565b5f615ce78385615ccc565b9350615cf48385846156c7565b615cfd836154c1565b840190509392505050565b5f606082019050615d1b5f830187614f15565b615d286020830186615246565b8181036040830152615d3b818486615cdc565b905095945050505050565b7f50726f6f6620736574206e6f74206c69766500000000000000000000000000005f82015250565b5f615d7a6012836159fb565b9150615d8582615d46565b602082019050919050565b5f6020820190508181035f830152615da781615d6e565b9050919050565b7f4d75737420616464206174206c65617374206f6e6520726f6f740000000000005f82015250565b5f615de2601a836159fb565b9150615ded82615dae565b602082019050919050565b5f6020820190508181035f830152615e0f81615dd6565b9050919050565b7f4f6e6c7920746865206f776e65722063616e2061646420726f6f7473000000005f82015250565b5f615e4a601c836159fb565b9150615e5582615e16565b602082019050919050565b5f6020820190508181035f830152615e7781615e3e565b9050919050565b5f80fd5b5f80fd5b5f80fd5b5f82356001604003833603038112615ea557615ea4615e7e565b5b80830191505092915050565b5f82356001602003833603038112615ecc57615ecb615e7e565b5b80830191505092915050565b5f615ee282614f0c565b9150615eed83614f0c565b9250828201905080821115615f0557615f04615b40565b5b92915050565b5f82825260208201905092915050565b5f819050919050565b5f80fd5b5f82356001602003833603038112615f4357615f42615f24565b5b82810191505092915050565b5f80fd5b5f80fd5b5f8083356001602003843603038112615f7357615f72615f24565b5b83810192508235915060208301925067ffffffffffffffff821115615f9b57615f9a615f4f565b5b600182023603831315615fb157615fb0615f53565b5b509250929050565b5f615fc48385615489565b9350615fd18385846156c7565b615fda836154c1565b840190509392505050565b5f60208301615ff65f840184615f57565b8583035f870152616008838284615fb9565b925050508091505092915050565b5f6160246020840184614f64565b905092915050565b5f6040830161603d5f840184615f28565b8482035f86015261604e8282615fe5565b91505061605e6020840184616016565b61606b602086018261505f565b508091505092915050565b5f616081838361602c565b905092915050565b5f823560016040038336030381126160a4576160a3615f24565b5b82810191505092915050565b5f602082019050919050565b5f6160c78385615f0b565b9350836020840285016160d984615f1b565b805f5b8781101561611c5784840389526160f38284616089565b6160fd8582616076565b9450616108836160b0565b925060208a019950506001810190506160dc565b50829750879450505050509392505050565b5f6080820190506161415f830189614f15565b61614e6020830188614f15565b81810360408301526161618186886160bc565b90508181036060830152616176818486615cdc565b9050979650505050505050565b7f4f6e6c7920746865206f776e65722063616e207363686564756c652072656d6f5f8201527f76616c206f6620726f6f74730000000000000000000000000000000000000000602082015250565b5f6161dd602c836159fb565b91506161e882616183565b604082019050919050565b5f6020820190508181035f83015261620a816161d1565b9050919050565b7f546f6f206d616e792072656d6f76616c73207761697420666f72206e657874205f8201527f70726f76696e6720706572696f6420746f207363686564756c65000000000000602082015250565b5f61626b603a836159fb565b915061627682616211565b604082019050919050565b5f6020820190508181035f8301526162988161625f565b9050919050565b7f43616e206f6e6c79207363686564756c652072656d6f76616c206f66206578695f8201527f7374696e6720726f6f7473000000000000000000000000000000000000000000602082015250565b5f6162f9602b836159fb565b91506163048261629f565b604082019050919050565b5f6020820190508181035f830152616326816162ed565b9050919050565b5f80fd5b82818337505050565b5f61634583856158bd565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8311156163785761637761632d565b5b602083029250616389838584616331565b82840190509392505050565b5f6060820190506163a85f830188614f15565b81810360208301526163bb81868861633a565b905081810360408301526163d0818486615cdc565b90509695505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f600282049050600182168061642057607f821691505b602082108103616433576164326163dc565b5b50919050565b5f819050919050565b61645361644e82614f0c565b616439565b82525050565b5f6164648284616442565b60208201915081905092915050565b5f81905092915050565b5f6164878261547f565b6164918185616473565b93506164a1818560208601615499565b80840191505092915050565b5f6164b8828461647d565b915081905092915050565b7f52616e646f6d6e65737320707265636f6d70696c652063616c6c206661696c655f8201527f6400000000000000000000000000000000000000000000000000000000000000602082015250565b5f61651d6021836159fb565b9150616528826164c3565b604082019050919050565b5f6020820190508181035f83015261654a81616511565b9050919050565b5f8151905061655f81614f4e565b92915050565b5f6020828403121561657a57616579614f46565b5b5f61658784828501616551565b91505092915050565b7f6f6e6c7920746865206f776e65722063616e206d6f766520746f206e657874205f8201527f70726f76696e6720706572696f64000000000000000000000000000000000000602082015250565b5f6165ea602e836159fb565b91506165f582616590565b604082019050919050565b5f6020820190508181035f830152616617816165de565b9050919050565b7f63616e206f6e6c792073746172742070726f76696e67206f6e6365206c6561765f8201527f6573206172652061646465640000000000000000000000000000000000000000602082015250565b5f616678602c836159fb565b91506166838261661e565b604082019050919050565b5f6020820190508181035f8301526166a58161666c565b9050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603160045260245ffd5b7f6368616c6c656e67652065706f6368206d757374206265206174206c656173745f8201527f206368616c6c656e676546696e616c6974792065706f63687320696e2074686560208201527f2066757475726500000000000000000000000000000000000000000000000000604082015250565b5f6167596047836159fb565b9150616764826166d9565b606082019050919050565b5f6020820190508181035f8301526167868161674d565b9050919050565b5f6080820190506167a05f830188614f15565b6167ad6020830187614f15565b6167ba6040830186614f15565b81810360608301526167cd818486615cdc565b90509695505050505050565b5f6040820190506167ec5f830185614f15565b6167f96020830184614f15565b9392505050565b5f61680a82614f0c565b915061681583614f0c565b925082820261682381614f0c565b9150828204841483151761683a57616839615b40565b5b5092915050565b5f6040820190506168545f83018561535d565b6168616020830184614f15565b9392505050565b5f80fd5b5f8160070b9050919050565b6168818161686c565b811461688b575f80fd5b50565b5f8151905061689c81616878565b92915050565b6168ab8161579d565b81146168b5575f80fd5b50565b5f815190506168c6816168a2565b92915050565b6168d5816157bf565b81146168df575f80fd5b50565b5f815190506168f0816168cc565b92915050565b5f6080828403121561690b5761690a616868565b5b616915608061567d565b90505f6169248482850161688e565b5f830152506020616937848285016168b8565b602083015250604061694b848285016168e2565b604083015250606061695f84828501616551565b60608301525092915050565b5f608082840312156169805761697f614f46565b5b5f61698d848285016168f6565b91505092915050565b7f6661696c656420746f2076616c69646174653a207072696365206d75737420625f8201527f652067726561746572207468616e203000000000000000000000000000000000602082015250565b5f6169f06030836159fb565b91506169fb82616996565b604082019050919050565b5f6020820190508181035f830152616a1d816169e4565b9050919050565b7f4f6e6c79207468652063757272656e74206f776e65722063616e2070726f706f5f8201527f73652061206e6577206f776e6572000000000000000000000000000000000000602082015250565b5f616a7e602e836159fb565b9150616a8982616a24565b604082019050919050565b5f6020820190508181035f830152616aab81616a72565b9050919050565b7f6368616c6c656e676552616e6765202d312073686f756c6420616c69676e20775f8201527f697468207468652076657279206c617374206c656166206f66206120726f6f74602082015250565b5f616b0c6040836159fb565b9150616b1782616ab2565b604082019050919050565b5f6020820190508181035f830152616b3981616b00565b9050919050565b7f70726f6f6620736574206964206f7574206f6620626f756e64730000000000005f82015250565b5f616b74601a836159fb565b9150616b7f82616b40565b602082019050919050565b5f6020820190508181035f830152616ba181616b68565b9050919050565b7f4f6e6c7920746865206f776e65722063616e2064656c6574652070726f6f66205f8201527f7365747300000000000000000000000000000000000000000000000000000000602082015250565b5f616c026024836159fb565b9150616c0d82616ba8565b604082019050919050565b5f6020820190508181035f830152616c2f81616bf6565b9050919050565b5f606082019050616c495f830187614f15565b616c566020830186614f15565b8181036040830152616c69818486615cdc565b905095945050505050565b7f4f6e6c79207468652070726f706f736564206f776e65722063616e20636c61695f8201527f6d206f776e657273686970000000000000000000000000000000000000000000602082015250565b5f616cce602b836159fb565b9150616cd982616c74565b604082019050919050565b5f6020820190508181035f830152616cfb81616cc2565b9050919050565b7f7072656d61747572652070726f6f6600000000000000000000000000000000005f82015250565b5f616d36600f836159fb565b9150616d4182616d02565b602082019050919050565b5f6020820190508181035f830152616d6381616d2a565b9050919050565b7f656d7074792070726f6f660000000000000000000000000000000000000000005f82015250565b5f616d9e600b836159fb565b9150616da982616d6a565b602082019050919050565b5f6020820190508181035f830152616dcb81616d92565b9050919050565b7f6e6f206368616c6c656e6765207363686564756c6564000000000000000000005f82015250565b5f616e066016836159fb565b9150616e1182616dd2565b602082019050919050565b5f6020820190508181035f830152616e3381616dfa565b9050919050565b5f8160c01b9050919050565b5f616e5082616e3a565b9050919050565b616e68616e638261579d565b616e46565b82525050565b5f616e798286616442565b602082019150616e898285616442565b602082019150616e998284616e57565b600882019150819050949350505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601260045260245ffd5b5f616ee182614f0c565b9150616eec83614f0c565b925082616efc57616efb616eaa565b5b828206905092915050565b5f82356001604003833603038112616f2257616f21615e7e565b5b80830191505092915050565b5f8083356001602003843603038112616f4a57616f49615e7e565b5b80840192508235915067ffffffffffffffff821115616f6c57616f6b615e82565b5b602083019250602082023603831315616f8857616f87615e86565b5b509250929050565b7f70726f6f6620646964206e6f74207665726966790000000000000000000000005f82015250565b5f616fc46014836159fb565b9150616fcf82616f90565b602082019050919050565b5f6020820190508181035f830152616ff181616fb8565b9050919050565b5f60808201905061700b5f830187614f15565b6170186020830186614f15565b6170256040830185614f15565b6170326060830184614f15565b95945050505050565b5f819050919050565b5f61705e6170596170548461703b565b615801565b61579d565b9050919050565b61706e81617044565b82525050565b5f6020820190506170875f830184617065565b92915050565b7f4c65616620696e646578206f7574206f6620626f756e647300000000000000005f82015250565b5f6170c16018836159fb565b91506170cc8261708d565b602082019050919050565b5f6020820190508181035f8301526170ee816170b5565b9050919050565b5f6170ff82614f0c565b91505f820361711157617110615b40565b5b600182039050919050565b5f61712682614f0c565b915061713183614f0c565b92508261714157617140616eaa565b5b828204905092915050565b7f496e636f72726563742066656520616d6f756e740000000000000000000000005f82015250565b5f6171806014836159fb565b915061718b8261714c565b602082019050919050565b5f6020820190508181035f8301526171ad81617174565b9050919050565b50565b5f6171c25f83616473565b91506171cd826171b4565b5f82019050919050565b5f6171e1826171b7565b9150819050919050565b7f4275726e206661696c65640000000000000000000000000000000000000000005f82015250565b5f61721f600b836159fb565b915061722a826171eb565b602082019050919050565b5f6020820190508181035f83015261724c81617213565b9050919050565b7f53697a65206d7573742062652061206d756c7469706c65206f662033320000005f82015250565b5f617287601d836159fb565b915061729282617253565b602082019050919050565b5f6040820190506172b05f830184614f15565b81810360208301526172c18161727b565b905092915050565b7f53697a65206d7573742062652067726561746572207468616e203000000000005f82015250565b5f6172fd601b836159fb565b9150617308826172c9565b602082019050919050565b5f6040820190506173265f830184614f15565b8181036020830152617337816172f1565b905092915050565b7f526f6f742073697a65206d757374206265206c657373207468616e20325e35305f82015250565b5f6173736020836159fb565b915061737e8261733f565b602082019050919050565b5f60408201905061739c5f830184614f15565b81810360208301526173ad81617367565b905092915050565b5f6173bf82614f0c565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036173f1576173f0615b40565b5b600182019050919050565b5f808335600160200384360303811261741857617417615e7e565b5b80840192508235915067ffffffffffffffff82111561743a57617439615e82565b5b60208301925060018202360383131561745657617455615e86565b5b509250929050565b5f82905092915050565b5f819050815f5260205f209050919050565b5f6020601f8301049050919050565b5f82821b905092915050565b5f600883026174c47fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82617489565b6174ce8683617489565b95508019841693508086168417925050509392505050565b5f6175006174fb6174f684614f0c565b615801565b614f0c565b9050919050565b5f819050919050565b617519836174e6565b61752d61752582617507565b848454617495565b825550505050565b5f90565b617541617535565b61754c818484617510565b505050565b5b8181101561756f576175645f82617539565b600181019050617552565b5050565b601f8211156175b45761758581617468565b61758e8461747a565b8101602085101561759d578190505b6175b16175a98561747a565b830182617551565b50505b505050565b5f82821c905092915050565b5f6175d45f19846008026175b9565b1980831691505092915050565b5f6175ec83836175c5565b9150826002028217905092915050565b617606838361745e565b67ffffffffffffffff81111561761f5761761e61561f565b5b6176298254616409565b617634828285617573565b5f601f831160018114617661575f841561764f578287013590505b61765985826175e1565b8655506176c0565b601f19841661766f86617468565b5f5b8281101561769657848901358255600182019150602085019450602081019050617671565b868310156176b357848901356176af601f8916826175c5565b8355505b6001600288020188555050505b50505050505050565b6176d48383836175fc565b505050565b5f81015f83016176e981856173fc565b6176f48183866176c9565b505050505050565b61770682826176d9565b5050565b7f6661696c656420746f2076616c69646174653a20657374696d617465642067615f8201527f7320666565206d7573742062652067726561746572207468616e203000000000602082015250565b5f617764603c836159fb565b915061776f8261770a565b604082019050919050565b5f6020820190508181035f83015261779181617758565b9050919050565b7f6661696c656420746f2076616c69646174653a204174746f46494c20707269635f8201527f65206d7573742062652067726561746572207468616e20300000000000000000602082015250565b5f6177f26038836159fb565b91506177fd82617798565b604082019050919050565b5f6020820190508181035f83015261781f816177e6565b9050919050565b7f6661696c656420746f2076616c69646174653a207261772073697a65206d75735f8201527f742062652067726561746572207468616e203000000000000000000000000000602082015250565b5f6178806033836159fb565b915061788b82617826565b604082019050919050565b5f6020820190508181035f8301526178ad81617874565b9050919050565b5f8160011c9050919050565b5f808291508390505b6001851115617909578086048111156178e5576178e4615b40565b5b60018516156178f45780820291505b8081029050617902856178b4565b94506178c9565b94509492505050565b5f8261792157600190506179dc565b8161792e575f90506179dc565b8160018114617944576002811461794e5761797d565b60019150506179dc565b60ff8411156179605761795f615b40565b5b8360020a91508482111561797757617976615b40565b5b506179dc565b5060208310610133831016604e8410600b84101617156179b25782820a9050838111156179ad576179ac615b40565b5b6179dc565b6179bf84848460016178c0565b925090508184048111156179d6576179d5615b40565b5b81810290505b9392505050565b5f63ffffffff82169050919050565b5f6179fc82614f0c565b9150617a07836179e3565b9250617a347fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8484617912565b905092915050565b5f617a46826157bf565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffff800000008203617a7857617a77615b40565b5b815f039050919050565b617a8b81615354565b8114617a95575f80fd5b50565b5f81519050617aa681617a82565b92915050565b5f60208284031215617ac157617ac0614f46565b5b5f617ace84828501617a98565b91505092915050565b7f436964206461746120697320746f6f2073686f727400000000000000000000005f82015250565b5f617b0b6015836159fb565b9150617b1682617ad7565b602082019050919050565b5f6020820190508181035f830152617b3881617aff565b9050919050565b5f819050602082019050919050565b5f617b598251615354565b80915050919050565b5f617b6c8261547f565b82617b7684617b3f565b9050617b8181617b4e565b92506020821015617bc157617bbc7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83602003600802617489565b831692505b5050919050565b5f606082019050617bdb5f830186614f15565b617be860208301856157b0565b617bf560408301846157cb565b949350505050565b7f496e7075742065786365656473206d6178696d756d20696e743235362076616c5f8201527f7565000000000000000000000000000000000000000000000000000000000000602082015250565b5f617c576022836159fb565b9150617c6282617bfd565b604082019050919050565b5f6020820190508181035f830152617c8481617c4b565b9050919050565b5f819050919050565b5f617c9e82617c8b565b91507f80000000000000000000000000000000000000000000000000000000000000008203617cd057617ccf615b40565b5b815f03905091905056fea264697066735822122033620393606d94f1c8d591222f06f6214eafe22531657caf6478a039d4cfa51864736f6c63430008170033","sourceMap":"1708:31540:17:-:0;;;1171:4:2;1128:48;;;;;;;;;7213:50:17;;;;;;;;;;7234:22;:20;;;:22;;:::i;:::-;1708:31540;;7711:422:1;7826:30;7859:26;:24;;;:26;;:::i;:::-;7826:59;;7900:1;:15;;;;;;;;;;;;7896:76;;;7938:23;;;;;;;;;;;;;;7896:76;8003:16;7985:34;;:1;:14;;;;;;;;;;;;:34;;;7981:146;;8052:16;8035:1;:14;;;:33;;;;;;;;;;;;;;;;;;8087:29;8099:16;8087:29;;;;;;:::i;:::-;;;;;;;;7981:146;7760:373;7711:422::o;8737:170::-;8795:30;8870:21;8860:31;;8737:170;:::o;7:101:20:-;43:7;83:18;76:5;72:30;61:41;;7:101;;;:::o;114:115::-;199:23;216:5;199:23;:::i;:::-;194:3;187:36;114:115;;:::o;235:218::-;326:4;364:2;353:9;349:18;341:26;;377:69;443:1;432:9;428:17;419:6;377:69;:::i;:::-;235:218;;;;:::o;1708:31540:17:-;;;;;;;;;;;;;;;;;;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405260043610610271575f3560e01c806367e406d51161014e5780639f8cb3bd116100c0578063f2fde38b11610079578063f2fde38b146109c5578063f58f952b146109ed578063f5cac1ba14610a09578063f83758fe14610a45578063faa6716314610a6f578063fe4b84df14610aab57610271565b80639f8cb3bd146108b9578063ad3cb1cc146108e3578063c0e159491461090d578063d49245c114610937578063ee3dac6514610973578063f178b1be1461099b57610271565b806371cf2a161161011257806371cf2a1614610789578063847d1d06146107c557806389208ba9146107ed5780638da5cb5b146108295780638ea417e5146108535780639153e64b1461087d57610271565b806367e406d5146106a95780636ba4608f146106d35780636cb55c161461070f5780636fa4469214610737578063715018a61461077357610271565b80633f84135f116101e757806347331050116101ab57806347331050146105965780634903704a146105d25780634f1ef2861461060e5780634fa279201461062a57806352d1902d1461065557806361a52a361461067f57610271565b80633f84135f1461048f578063453f4f62146104cb57806345c0b92d14610507578063462dd4491461052f5780634726075b1461055957610271565b806315b175701161023957806315b175701461037157806316e2bcd51461039b57806319c75950146103c557806331601226146103ef5780633b68e4e91461042b5780633b7ae9131461045357610271565b8063029b4646146102755780630528a55b1461029f5780630a4d7932146102db5780630a6a63f11461030b57806311c0ee4a14610335575b5f80fd5b348015610280575f80fd5b50610289610ad3565b6040516102969190614f24565b60405180910390f35b3480156102aa575f80fd5b506102c560048036038101906102c09190614fd9565b610ad9565b6040516102d2919061511a565b60405180910390f35b6102f560048036038101906102f091906151e9565b610bc8565b6040516103029190614f24565b60405180910390f35b348015610316575f80fd5b5061031f610ee8565b60405161032c9190615255565b60405180910390f35b348015610340575f80fd5b5061035b600480360381019061035691906152c3565b610f00565b6040516103689190614f24565b60405180910390f35b34801561037c575f80fd5b506103856112a4565b6040516103929190615255565b60405180910390f35b3480156103a6575f80fd5b506103af6112bc565b6040516103bc9190614f24565b60405180910390f35b3480156103d0575f80fd5b506103d96112c7565b6040516103e6919061536c565b60405180910390f35b3480156103fa575f80fd5b5061041560048036038101906104109190615385565b6112ed565b6040516104229190615255565b60405180910390f35b348015610436575f80fd5b50610451600480360381019061044c91906153b0565b61136e565b005b34801561045e575f80fd5b5061047960048036038101906104749190615441565b6116b6565b6040516104869190615530565b60405180910390f35b34801561049a575f80fd5b506104b560048036038101906104b09190615385565b6117c8565b6040516104c29190614f24565b60405180910390f35b3480156104d6575f80fd5b506104f160048036038101906104ec9190615385565b61182a565b6040516104fe9190614f24565b60405180910390f35b348015610512575f80fd5b5061052d60048036038101906105289190615550565b611924565b005b34801561053a575f80fd5b50610543611e04565b6040516105509190614f24565b60405180910390f35b348015610564575f80fd5b5061057f600480360381019061057a9190615385565b611e08565b60405161058d9291906155c1565b60405180910390f35b3480156105a1575f80fd5b506105bc60048036038101906105b79190615441565b611ebd565b6040516105c99190615602565b60405180910390f35b3480156105dd575f80fd5b506105f860048036038101906105f39190615441565b611f16565b6040516106059190614f24565b60405180910390f35b61062860048036038101906106239190615743565b611f7a565b005b348015610635575f80fd5b5061063e611f99565b60405161064c9291906157da565b60405180910390f35b348015610660575f80fd5b506106696120ab565b604051610676919061536c565b60405180910390f35b34801561068a575f80fd5b506106936120dc565b6040516106a09190614f24565b60405180910390f35b3480156106b4575f80fd5b506106bd6120e3565b6040516106ca919061585c565b60405180910390f35b3480156106de575f80fd5b506106f960048036038101906106f49190615385565b6120fb565b6040516107069190614f24565b60405180910390f35b34801561071a575f80fd5b5061073560048036038101906107309190615875565b61215d565b005b348015610742575f80fd5b5061075d60048036038101906107589190615385565b612307565b60405161076a919061595b565b60405180910390f35b34801561077e575f80fd5b50610787612416565b005b348015610794575f80fd5b506107af60048036038101906107aa9190615441565b612429565b6040516107bc9190615602565b60405180910390f35b3480156107d0575f80fd5b506107eb60048036038101906107e6919061597b565b612517565b005b3480156107f8575f80fd5b50610813600480360381019061080e9190615385565b612817565b6040516108209190614f24565b60405180910390f35b348015610834575f80fd5b5061083d612879565b60405161084a9190615255565b60405180910390f35b34801561085e575f80fd5b506108676128ae565b60405161087491906159d8565b60405180910390f35b348015610888575f80fd5b506108a3600480360381019061089e9190615441565b6128ca565b6040516108b09190614f24565b60405180910390f35b3480156108c4575f80fd5b506108cd61293c565b6040516108da9190614f24565b60405180910390f35b3480156108ee575f80fd5b506108f7612942565b6040516109049190615a43565b60405180910390f35b348015610918575f80fd5b5061092161297b565b60405161092e9190614f24565b60405180910390f35b348015610942575f80fd5b5061095d60048036038101906109589190615385565b612980565b60405161096a9190614f24565b60405180910390f35b34801561097e575f80fd5b5061099960048036038101906109949190615385565b6129e2565b005b3480156109a6575f80fd5b506109af612bdd565b6040516109bc9190614f24565b60405180910390f35b3480156109d0575f80fd5b506109eb60048036038101906109e69190615a63565b612be1565b005b610a076004803603810190610a029190615ae3565b612c65565b005b348015610a14575f80fd5b50610a2f6004803603810190610a2a9190615385565b613215565b604051610a3c9190615602565b60405180910390f35b348015610a50575f80fd5b50610a596132a7565b604051610a669190614f24565b60405180910390f35b348015610a7a575f80fd5b50610a956004803603810190610a909190615385565b6132af565b604051610aa29190614f24565b60405180910390f35b348015610ab6575f80fd5b50610ad16004803603810190610acc9190615385565b613311565b005b61080081565b60605f610af660055f8781526020019081526020015f20546134a0565b610100610b039190615b6d565b90505f8484905067ffffffffffffffff811115610b2357610b2261561f565b5b604051908082528060200260200182016040528015610b5c57816020015b610b49614e89565b815260200190600190039081610b415790505b5090505f5b85859050811015610bbb57610b9087878784818110610b8357610b82615ba0565b5b90506020020135856135c9565b828281518110610ba357610ba2615ba0565b5b60200260200101819052508080600101915050610b61565b5080925050509392505050565b5f610800838390501115610c11576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c0890615c17565b60405180910390fd5b5f610c1a6137d4565b905080341015610c5f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c5690615c7f565b60405180910390fd5b610c68816137fb565b80341115610cc0573373ffffffffffffffffffffffffffffffffffffffff166108fc8234610c969190615b6d565b90811502906040515f60405180830381858888f19350505050158015610cbe573d5f803e3d5ffd5b505b5f60015f81819054906101000a900467ffffffffffffffff1680929190610ce690615c9d565b91906101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555067ffffffffffffffff1690505f60065f8381526020019081526020015f20819055505f60075f8381526020019081526020015f208190555033600b5f8381526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508560085f8381526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505f600d5f8381526020019081526020015f20819055505f73ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff1614610e98578573ffffffffffffffffffffffffffffffffffffffff166394d41b36823388886040518563ffffffff1660e01b8152600401610e6a9493929190615d08565b5f604051808303815f87803b158015610e81575f80fd5b505af1158015610e93573d5f803e3d5ffd5b505050505b3373ffffffffffffffffffffffffffffffffffffffff16817f017f0b33d96e8f9968590172013032c2346cf047787a5e17a44b0a1bb3cd0f0160405160405180910390a380925050509392505050565b73ff0000000000000000000000000000000000006381565b5f610800838390501115610f49576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610f4090615c17565b60405180910390fd5b610f5286613215565b610f91576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610f8890615d90565b60405180910390fd5b5f8585905011610fd6576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610fcd90615df8565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8881526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614611074576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161106b90615e60565b60405180910390fd5b5f60055f8881526020019081526020015f205490505f8686905067ffffffffffffffff8111156110a7576110a661561f565b5b6040519080825280602002602001820160405280156110d55781602001602082028036833780820191505090505b5090505f5b878790508110156111845761114b89828a8a858181106110fd576110fc615ba0565b5b905060200281019061110f9190615e8a565b805f019061111d9190615eb1565b8b8b868181106111305761112f615ba0565b5b90506020028101906111429190615e8a565b602001356138fe565b5080836111589190615ed8565b82828151811061116b5761116a615ba0565b5b60200260200101818152505080806001019150506110da565b50877f5ce51a8003915c377679ba533d9dafa0792058b254965697e674272f13f4fdd3826040516111b5919061595b565b60405180910390a25f60085f8a81526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614611295578073ffffffffffffffffffffffffffffffffffffffff166312d5d66f8a858b8b8b8b6040518763ffffffff1660e01b81526004016112679695949392919061612e565b5f604051808303815f87803b15801561127e575f80fd5b505af1158015611290573d5f803e3d5ffd5b505050505b82935050505095945050505050565b73fe0000000000000000000000000000000000000681565b660400000000000081565b7f150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e5f1b81565b5f6112f782613215565b611336576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161132d90615d90565b60405180910390fd5b60085f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050919050565b6108008282905011156113b6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016113ad90615c17565b60405180910390fd5b6113bf85613215565b6113fe576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016113f590615d90565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8781526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161461149c576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611493906161f3565b60405180910390fd5b6107d0600a5f8781526020019081526020015f2080549050858590506114c29190615ed8565b1115611503576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016114fa90616281565b60405180910390fd5b5f5b848490508110156115d75760055f8781526020019081526020015f205485858381811061153557611534615ba0565b5b905060200201351061157c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016115739061630f565b60405180910390fd5b600a5f8781526020019081526020015f208585838181106115a05761159f615ba0565b5b90506020020135908060018154018082558091505060019003905f5260205f20015f90919091909150558080600101915050611505565b505f60085f8781526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146116ae578073ffffffffffffffffffffffffffffffffffffffff16634af7d1d287878787876040518663ffffffff1660e01b8152600401611680959493929190616395565b5f604051808303815f87803b158015611697575f80fd5b505af11580156116a9573d5f803e3d5ffd5b505050505b505050505050565b6116be614ea1565b6116c783613215565b611706576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016116fd90615d90565b60405180910390fd5b60025f8481526020019081526020015f205f8381526020019081526020015f206040518060200160405290815f8201805461174090616409565b80601f016020809104026020016040519081016040528092919081815260200182805461176c90616409565b80156117b75780601f1061178e576101008083540402835291602001916117b7565b820191905f5260205f20905b81548152906001019060200180831161179a57829003601f168201915b505050505081525050905092915050565b5f6117d282613215565b611811576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161180890615d90565b60405180910390fd5b60065f8381526020019081526020015f20549050919050565b5f805f73fe0000000000000000000000000000000000000673ffffffffffffffffffffffffffffffffffffffff16846040516020016118699190616459565b60405160208183030381529060405260405161188591906164ad565b5f60405180830381855afa9150503d805f81146118bd576040519150601f19603f3d011682016040523d82523d5f602084013e6118c2565b606091505b509150915081611907576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016118fe90616533565b60405180910390fd5b8080602001905181019061191b9190616565565b92505050919050565b61080082829050111561196c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161196390615c17565b60405180910390fd5b600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614611a0a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611a0190616600565b60405180910390fd5b5f60065f8681526020019081526020015f205411611a5d576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611a549061668e565b60405180910390fd5b5f600d5f8681526020019081526020015f205403611a8c5743600d5f8681526020019081526020015f20819055505b5f600a5f8681526020019081526020015f2090505f818054905067ffffffffffffffff811115611abf57611abe61561f565b5b604051908082528060200260200182016040528015611aed5781602001602082028036833780820191505090505b5090505f5b8151811015611b77578260018480549050611b0d9190615b6d565b81548110611b1e57611b1d615ba0565b5b905f5260205f200154828281518110611b3a57611b39615ba0565b5b60200260200101818152505082805480611b5757611b566166ac565b5b600190038181905f5260205f20015f905590558080600101915050611af2565b50611b828682613aab565b857fd22bb0ee05b8ca92312459c76223d3b9bc1bd96fb6c9b18e637ededf92d8117482604051611bb2919061595b565b60405180910390a260065f8781526020019081526020015f205460095f8881526020019081526020015f20819055505f5443611bee9190615ed8565b851015611c30576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611c279061676f565b60405180910390fd5b8460075f8881526020019081526020015f20819055505f60065f8881526020019081526020015f205403611cb857857f323c29bc8d678a5d987b90a321982d10b9a91bcad071a9e445879497bf0e68e760405160405180910390a25f600d5f8881526020019081526020015f20819055505f60075f8881526020019081526020015f20819055505b5f60085f8881526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614611db0578073ffffffffffffffffffffffffffffffffffffffff1663aa27ebcc8860075f8b81526020019081526020015f205460065f8c81526020019081526020015f205489896040518663ffffffff1660e01b8152600401611d8295949392919061678d565b5f604051808303815f87803b158015611d99575f80fd5b505af1158015611dab573d5f803e3d5ffd5b505050505b867fc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e8760065f8b81526020019081526020015f2054604051611df39291906167d9565b60405180910390a250505050505050565b5f81565b5f80611e1383613215565b611e52576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611e4990615d90565b60405180910390fd5b600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600c5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691509150915091565b5f611ec783613215565b8015611ee3575060055f8481526020019081526020015f205482105b8015611f0e57505f60035f8581526020019081526020015f205f8481526020019081526020015f2054115b905092915050565b5f8060095f8581526020019081526020015f20546020611f369190616800565b90505f80611f42611f99565b91509150611f6f85838386600d5f8c81526020019081526020015f205443611f6a9190615b6d565b613b6a565b935050505092915050565b611f82613de7565b611f8b82613ecd565b611f958282613ed8565b5050565b5f805f73a2aa501b19aff244d90cc15a4cf739d2725b572973ffffffffffffffffffffffffffffffffffffffff1663a4ae35e07f150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e5f1b620151806040518363ffffffff1660e01b8152600401612010929190616841565b608060405180830381865afa15801561202b573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061204f919061696b565b90505f815f015160070b13612099576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161209090616a06565b60405180910390fd5b805f0151816040015192509250509091565b5f6120b4613ff6565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b905090565b6201518081565b73a2aa501b19aff244d90cc15a4cf739d2725b572981565b5f61210582613215565b612144576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161213b90615d90565b60405180910390fd5b60075f8381526020019081526020015f20549050919050565b61216682613215565b6121a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161219c90615d90565b60405180910390fd5b5f600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690503373ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614612247576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161223e90616a94565b60405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16036122b257600c5f8481526020019081526020015f205f6101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055612302565b81600c5f8581526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b505050565b606061231282613215565b612351576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161234890615d90565b60405180910390fd5b5f600a5f8481526020019081526020015f2090505f818054905067ffffffffffffffff8111156123845761238361561f565b5b6040519080825280602002602001820160405280156123b25781602001602082028036833780820191505090505b5090505f5b828054905081101561240b578281815481106123d6576123d5615ba0565b5b905f5260205f2001548282815181106123f2576123f1615ba0565b5b60200260200101818152505080806001019150506123b7565b508092505050919050565b61241e61407d565b6124275f614104565b565b5f8061244560055f8681526020019081526020015f20546134a0565b6101006124529190615b6d565b90505f61247d85600160095f8981526020019081526020015f20546124779190615b6d565b846135c9565b9050600160035f8781526020019081526020015f205f835f015181526020019081526020015f20546124af9190615b6d565b8160200151146124f4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016124eb90616b22565b60405180910390fd5b6124fe8585611ebd565b801561250d5750805f01518411155b9250505092915050565b61080082829050111561255f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161255690615c17565b60405180910390fd5b60015f9054906101000a900467ffffffffffffffff1667ffffffffffffffff1683106125c0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016125b790616b8a565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161461265e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161265590616c18565b60405180910390fd5b5f60065f8581526020019081526020015f205490505f60065f8681526020019081526020015f20819055505f600b5f8681526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505f60075f8681526020019081526020015f20819055505f600d5f8681526020019081526020015f20819055505f60085f8681526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146127d8578073ffffffffffffffffffffffffffffffffffffffff166326c249e3868487876040518563ffffffff1660e01b81526004016127aa9493929190616c36565b5f604051808303815f87803b1580156127c1575f80fd5b505af11580156127d3573d5f803e3d5ffd5b505050505b847f589e9a441b5bddda77c4ab647b0108764a9cc1a7f655aa9b7bc50b8bdfab8673836040516128089190614f24565b60405180910390a25050505050565b5f61282182613215565b612860576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161285790615d90565b60405180910390fd5b60095f8381526020019081526020015f20549050919050565b5f806128836141d5565b9050805f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691505090565b5f60015f9054906101000a900467ffffffffffffffff16905090565b5f6128d483613215565b612913576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161290a90615d90565b60405180910390fd5b60035f8481526020019081526020015f205f8381526020019081526020015f2054905092915050565b6107d081565b6040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b602081565b5f61298a82613215565b6129c9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016129c090615d90565b60405180910390fd5b60055f8381526020019081526020015f20549050919050565b6129eb81613215565b612a2a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612a2190615d90565b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff16600c5f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614612ac8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612abf90616ce4565b60405180910390fd5b5f600b5f8381526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905033600b5f8481526020019081526020015f205f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600c5f8381526020019081526020015f205f6101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690553373ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837fd3273037b635678293ef0c076bd77af13760e75e12806d1db237616d03c3a76660405160405180910390a45050565b5f81565b612be961407d565b5f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603612c59575f6040517f1e4fbdf7000000000000000000000000000000000000000000000000000000008152600401612c509190615255565b60405180910390fd5b612c6281614104565b50565b5f5a9050600b5f8581526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614612d07576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612cfe90616600565b60405180910390fd5b5f60075f8681526020019081526020015f2054905080431015612d5f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612d5690616d4c565b60405180910390fd5b5f8484905011612da4576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612d9b90616db4565b60405180910390fd5b5f8103612de6576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401612ddd90616e1c565b60405180910390fd5b5f8484905067ffffffffffffffff811115612e0457612e0361561f565b5b604051908082528060200260200182016040528015612e3d57816020015b612e2a614e89565b815260200190600190039081612e225790505b5090505f612e4a876141fc565b90505f60095f8981526020019081526020015f205490505f612e7c60055f8b81526020019081526020015f20546134a0565b610100612e899190615b6d565b90505f5b888890508167ffffffffffffffff16101561308b575f848b83604051602001612eb893929190616e6e565b60405160208183030381529060405290505f8482805190602001205f1c612edf9190616ed7565b9050612eec8c82866135c9565b878467ffffffffffffffff1681518110612f0957612f08615ba0565b5b60200260200101819052505f612f4e612f498e8a8767ffffffffffffffff1681518110612f3957612f38615ba0565b5b60200260200101515f01516116b6565b61421e565b90505f6130328d8d8767ffffffffffffffff16818110612f7157612f70615ba0565b5b9050602002810190612f839190616f07565b8060200190612f929190616f2e565b808060200260200160405190810160405280939291908181526020018383602002808284375f81840152601f19601f82011690508083019250505050505050838f8f8967ffffffffffffffff16818110612fef57612fee615ba0565b5b90506020028101906130019190616f07565b5f01358c8967ffffffffffffffff168151811061302157613020615ba0565b5b602002602001015160200151614360565b905080613074576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161306b90616fda565b60405180910390fd5b50505050808061308390615c9d565b915050612e8d565b505f610514602061309c8b8b614378565b6130a69190615ed8565b6130b09190616800565b5a886130bc9190615b6d565b6130c69190615ed8565b90506130d28a826143fd565b5f60085f8c81526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146131ba578073ffffffffffffffffffffffffffffffffffffffff1663356de02b8c60065f8f81526020019081526020015f2054888e8e90506040518563ffffffff1660e01b815260040161318c9493929190616ff8565b5f604051808303815f87803b1580156131a3575f80fd5b505af11580156131b5573d5f803e3d5ffd5b505050505b43600d5f8d81526020019081526020015f20819055508a7f1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e5087604051613200919061511a565b60405180910390a25050505050505050505050565b5f60015f9054906101000a900467ffffffffffffffff1667ffffffffffffffff16821080156132a057505f73ffffffffffffffffffffffffffffffffffffffff16600b5f8481526020019081526020015f205f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b9050919050565b5f8054905090565b5f6132b982613215565b6132f8576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016132ef90615d90565b60405180910390fd5b600d5f8381526020019081526020015f20549050919050565b5f61331a61450d565b90505f815f0160089054906101000a900460ff161590505f825f015f9054906101000a900467ffffffffffffffff1690505f808267ffffffffffffffff161480156133625750825b90505f60018367ffffffffffffffff1614801561339557505f3073ffffffffffffffffffffffffffffffffffffffff163b145b9050811580156133a3575080155b156133da576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001855f015f6101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055508315613427576001855f0160086101000a81548160ff0219169083151502179055505b61343033614534565b613438614548565b855f819055508315613498575f855f0160086101000a81548160ff0219169083151502179055507fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2600160405161348f9190617074565b60405180910390a15b505050505050565b5f8061010090505f608084901c90505f81146134c9576080826134c39190615b6d565b91508093505b604084901c90505f81146134ea576040826134e49190615b6d565b91508093505b602084901c90505f811461350b576020826135059190615b6d565b91508093505b601084901c90505f811461352c576010826135269190615b6d565b91508093505b600884901c90505f811461354d576008826135479190615b6d565b91508093505b600484901c90505f811461356e576004826135689190615b6d565b91508093505b600284901c90505f811461358f576002826135899190615b6d565b91508093505b600184901c90505f81146135b3576002826135aa9190615b6d565b925050506135c4565b83826135bf9190615b6d565b925050505b919050565b6135d1614e89565b60065f8581526020019081526020015f20548310613624576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161361b906170d7565b60405180910390fd5b5f6001836001901b6136369190615b6d565b90505f80808590505b5f81111561373b5760055f8981526020019081526020015f205484106136825760018161366c9190615b6d565b6001901b8461367b9190615b6d565b9350613728565b60045f8981526020019081526020015f205f8581526020019081526020015f2054836136ae9190615ed8565b91508682116137085760045f8981526020019081526020015f205f8581526020019081526020015f2054836136e39190615ed8565b92506001816136f29190615b6d565b6001901b846137019190615ed8565b9350613727565b6001816137159190615b6d565b6001901b846137249190615b6d565b93505b5b8080613733906170f5565b91505061363f565b5060045f8881526020019081526020015f205f8481526020019081526020015f2054826137689190615ed8565b90508581116137a75760405180604001604052806001856137899190615ed8565b8152602001828861379a9190615b6d565b81525093505050506137cd565b604051806040016040528084815260200183886137c49190615b6d565b81525093505050505b9392505050565b5f600a6001670de0b6b3a76400006137ec9190616800565b6137f6919061711c565b905090565b8034101561383e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161383590617196565b60405180910390fd5b5f73ff0000000000000000000000000000000000006373ffffffffffffffffffffffffffffffffffffffff1682604051613877906171d7565b5f6040518083038185875af1925050503d805f81146138b1576040519150601f19603f3d011682016040523d82523d5f602084013e6138b6565b606091505b50509050806138fa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016138f190617235565b60405180910390fd5b5050565b5f8060208361390d9190616ed7565b1461394f57836040517fc7b67cf3000000000000000000000000000000000000000000000000000000008152600401613946919061729d565b60405180910390fd5b5f820361399357836040517fc7b67cf300000000000000000000000000000000000000000000000000000000815260040161398a9190617313565b60405180910390fd5b66040000000000008211156139df57836040517fc7b67cf30000000000000000000000000000000000000000000000000000000081526004016139d69190617389565b60405180910390fd5b5f6020836139ed919061711c565b90505f60055f8881526020019081526020015f205f815480929190613a11906173b5565b919050559050613a22878383614552565b8460025f8981526020019081526020015f205f8381526020019081526020015f208181613a4f91906176fc565b9050508160035f8981526020019081526020015f205f8381526020019081526020015f20819055508160065f8981526020019081526020015f205f828254613a979190615ed8565b925050819055508092505050949350505050565b613ab482613215565b613af3576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613aea90615d90565b60405180910390fd5b5f805b8251811015613b3d57613b2384848381518110613b1657613b15615ba0565b5b60200260200101516145ed565b82613b2e9190615ed8565b91508080600101915050613af6565b508060065f8581526020019081526020015f205f828254613b5e9190615b6d565b92505081905550505050565b5f80861180613b7857505f48145b613bb7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613bae9061777a565b60405180910390fd5b5f8567ffffffffffffffff1611613c03576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613bfa90617808565b60405180910390fd5b5f8311613c45576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401613c3c90617896565b60405180910390fd5b5f808560030b12613cc35784600a613c5d91906179f2565b8667ffffffffffffffff166201518065010000000000613c7d9190616800565b613c879190616800565b613c919190616800565b6001670de0b6b3a7640000613ca69190616800565b6002613cb29190616800565b613cbc919061711c565b9050613d3b565b8567ffffffffffffffff166201518065010000000000613ce39190616800565b613ced9190616800565b85613cf790617a3c565b600a613d0391906179f2565b6001670de0b6b3a7640000613d189190616800565b6002613d249190616800565b613d2e9190616800565b613d38919061711c565b90505b5f848483613d499190616800565b613d539190616800565b90505f6064600583613d659190616800565b613d6f919061711c565b90505f6064600484613d819190616800565b613d8b919061711c565b9050818a10613da0575f945050505050613dde565b808a10613dbe578982613db39190615b6d565b945050505050613dde565b6064600184613dcd9190616800565b613dd7919061711c565b9450505050505b95945050505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161480613e9457507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16613e7b61467b565b73ffffffffffffffffffffffffffffffffffffffff1614155b15613ecb576040517fe07c8dba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b613ed561407d565b50565b8173ffffffffffffffffffffffffffffffffffffffff166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa925050508015613f4057506040513d601f19601f82011682018060405250810190613f3d9190617aac565b60015b613f8157816040517f4c9c8ce3000000000000000000000000000000000000000000000000000000008152600401613f789190615255565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b8114613fe757806040517faa1d49a4000000000000000000000000000000000000000000000000000000008152600401613fde919061536c565b60405180910390fd5b613ff183836146ce565b505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161461407b576040517fe07c8dba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b614085614740565b73ffffffffffffffffffffffffffffffffffffffff166140a3612879565b73ffffffffffffffffffffffffffffffffffffffff1614614102576140c6614740565b6040517f118cdaa70000000000000000000000000000000000000000000000000000000081526004016140f99190615255565b60405180910390fd5b565b5f61410d6141d5565b90505f815f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905082825f015f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508273ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3505050565b5f7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300905090565b5f61421760075f8481526020019081526020015f205461182a565b9050919050565b5f6020825f0151511015614267576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161425e90617b21565b60405180910390fd5b5f602067ffffffffffffffff8111156142835761428261561f565b5b6040519080825280601f01601f1916602001820160405280156142b55781602001600182028036833780820191505090505b5090505f5b602081101561434d57835f0151816020865f0151516142d99190615b6d565b6142e39190615ed8565b815181106142f4576142f3615ba0565b5b602001015160f81c60f81b82828151811061431257614311615ba0565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a90535080806001019150506142ba565b508061435890617b62565b915050919050565b5f8361436d868585614747565b149050949350505050565b5f805f90505f5b848490508110156143f257602085858381811061439f5761439e615ba0565b5b90506020028101906143b19190616f07565b80602001906143c09190616f2e565b90506143cc9190616800565b60406143d89190615ed8565b826143e39190615ed8565b9150808060010191505061437f565b508091505092915050565b5f488261440a9190616800565b90505f60095f8581526020019081526020015f2054602061442b9190616800565b90505f80614437611f99565b915091505f61446585848487600d5f8d81526020019081526020015f2054436144609190615b6d565b613b6a565b9050614470816137fb565b803411156144c8573373ffffffffffffffffffffffffffffffffffffffff166108fc823461449e9190615b6d565b90811502906040515f60405180830381858888f193505050501580156144c6573d5f803e3d5ffd5b505b867f928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b8285856040516144fc93929190617bc8565b60405180910390a250505050505050565b5f7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00905090565b61453c6147cf565b6145458161480f565b50565b6145506147cf565b565b5f8190505f61456082614893565b90505f8490505f5b828110156145bf575f816001901b856145819190615b6d565b905060045f8981526020019081526020015f205f8281526020019081526020015f2054836145af9190615ed8565b9250508080600101915050614568565b508060045f8881526020019081526020015f205f8681526020019081526020015f2081905550505050505050565b5f8060035f8581526020019081526020015f205f8481526020019081526020015f2054905061461d8484836148b0565b60035f8581526020019081526020015f205f8481526020019081526020015f205f905560025f8581526020019081526020015f205f8481526020019081526020015f205f8082015f61466f9190614eb4565b50508091505092915050565b5f6146a77f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b61496b565b5f015f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b6146d782614974565b8173ffffffffffffffffffffffffffffffffffffffff167fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b60405160405180910390a25f815111156147335761472d8282614a3d565b5061473c565b61473b614abd565b5b5050565b5f33905090565b5f808390505f5b85518110156147c3575f86828151811061476b5761476a615ba0565b5b602002602001015190505f6002866147839190616ed7565b03614799576147928382614af9565b92506147a6565b6147a38184614af9565b92505b6002856147b3919061711c565b945050808060010191505061474e565b50809150509392505050565b6147d7614b0c565b61480d576040517fd7e6bcf800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b6148176147cf565b5f73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603614887575f6040517f1e4fbdf700000000000000000000000000000000000000000000000000000000815260040161487e9190615255565b60405180910390fd5b61489081614104565b50565b5f6148a96001836148a49190615ed8565b614b2a565b9050919050565b5f6148cb60055f8681526020019081526020015f20546134a0565b6101006148d89190615b6d565b90505f6148e484614893565b90505b818111158015614907575060055f8681526020019081526020015f205484105b15614964578260045f8781526020019081526020015f205f8681526020019081526020015f205f82825461493b9190615b6d565b92505081905550806001901b846149529190615ed8565b935061495d84614893565b90506148e7565b5050505050565b5f819050919050565b5f8173ffffffffffffffffffffffffffffffffffffffff163b036149cf57806040517f4c9c8ce30000000000000000000000000000000000000000000000000000000081526004016149c69190615255565b60405180910390fd5b806149fb7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5f1b61496b565b5f015f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b60605f808473ffffffffffffffffffffffffffffffffffffffff1684604051614a6691906164ad565b5f60405180830381855af49150503d805f8114614a9e576040519150601f19603f3d011682016040523d82523d5f602084013e614aa3565b606091505b5091509150614ab3858383614d6f565b9250505092915050565b5f341115614af7576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b5f614b048383614dfc565b905092915050565b5f614b1561450d565b5f0160089054906101000a900460ff16905090565b5f7f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821115614b8e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401614b8590617c6d565b60405180910390fd5b5f61010090505f83614b9f90617c94565b905083811690505f8114614bbc578180614bb8906170f5565b9250505b5f6fffffffffffffffffffffffffffffffff821614614be557608082614be29190615b6d565b91505b5f77ffffffffffffffff0000000000000000ffffffffffffffff821614614c1657604082614c139190615b6d565b91505b5f7bffffffff00000000ffffffff00000000ffffffff00000000ffffffff821614614c4b57602082614c489190615b6d565b91505b5f7dffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff821614614c8257601082614c7f9190615b6d565b91505b5f7eff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff821614614cba57600882614cb79190615b6d565b91505b5f7f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f821614614cf357600482614cf09190615b6d565b91505b5f7f3333333333333333333333333333333333333333333333333333333333333333821614614d2c57600282614d299190615b6d565b91505b5f7f5555555555555555555555555555555555555555555555555555555555555555821614614d6557600182614d629190615b6d565b91505b8192505050919050565b606082614d8457614d7f82614e45565b614df4565b5f8251148015614daa57505f8473ffffffffffffffffffffffffffffffffffffffff163b145b15614dec57836040517f9996b315000000000000000000000000000000000000000000000000000000008152600401614de39190615255565b60405180910390fd5b819050614df5565b5b9392505050565b5f825f528160205260205f60405f60025afa614e16575f80fd5b5f5190507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3f8116905092915050565b5f81511115614e575780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405180604001604052805f81526020015f81525090565b6040518060200160405280606081525090565b508054614ec090616409565b5f825580601f10614ed15750614eee565b601f0160209004905f5260205f2090810190614eed9190614ef1565b5b50565b5b80821115614f08575f815f905550600101614ef2565b5090565b5f819050919050565b614f1e81614f0c565b82525050565b5f602082019050614f375f830184614f15565b92915050565b5f604051905090565b5f80fd5b5f80fd5b614f5781614f0c565b8114614f61575f80fd5b50565b5f81359050614f7281614f4e565b92915050565b5f80fd5b5f80fd5b5f80fd5b5f8083601f840112614f9957614f98614f78565b5b8235905067ffffffffffffffff811115614fb657614fb5614f7c565b5b602083019150836020820283011115614fd257614fd1614f80565b5b9250929050565b5f805f60408486031215614ff057614fef614f46565b5b5f614ffd86828701614f64565b935050602084013567ffffffffffffffff81111561501e5761501d614f4a565b5b61502a86828701614f84565b92509250509250925092565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b61506881614f0c565b82525050565b604082015f8201516150825f85018261505f565b506020820151615095602085018261505f565b50505050565b5f6150a6838361506e565b60408301905092915050565b5f602082019050919050565b5f6150c882615036565b6150d28185615040565b93506150dd83615050565b805f5b8381101561510d5781516150f4888261509b565b97506150ff836150b2565b9250506001810190506150e0565b5085935050505092915050565b5f6020820190508181035f83015261513281846150be565b905092915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6151638261513a565b9050919050565b61517381615159565b811461517d575f80fd5b50565b5f8135905061518e8161516a565b92915050565b5f8083601f8401126151a9576151a8614f78565b5b8235905067ffffffffffffffff8111156151c6576151c5614f7c565b5b6020830191508360018202830111156151e2576151e1614f80565b5b9250929050565b5f805f60408486031215615200576151ff614f46565b5b5f61520d86828701615180565b935050602084013567ffffffffffffffff81111561522e5761522d614f4a565b5b61523a86828701615194565b92509250509250925092565b61524f81615159565b82525050565b5f6020820190506152685f830184615246565b92915050565b5f8083601f84011261528357615282614f78565b5b8235905067ffffffffffffffff8111156152a05761529f614f7c565b5b6020830191508360208202830111156152bc576152bb614f80565b5b9250929050565b5f805f805f606086880312156152dc576152db614f46565b5b5f6152e988828901614f64565b955050602086013567ffffffffffffffff81111561530a57615309614f4a565b5b6153168882890161526e565b9450945050604086013567ffffffffffffffff81111561533957615338614f4a565b5b61534588828901615194565b92509250509295509295909350565b5f819050919050565b61536681615354565b82525050565b5f60208201905061537f5f83018461535d565b92915050565b5f6020828403121561539a57615399614f46565b5b5f6153a784828501614f64565b91505092915050565b5f805f805f606086880312156153c9576153c8614f46565b5b5f6153d688828901614f64565b955050602086013567ffffffffffffffff8111156153f7576153f6614f4a565b5b61540388828901614f84565b9450945050604086013567ffffffffffffffff81111561542657615425614f4a565b5b61543288828901615194565b92509250509295509295909350565b5f806040838503121561545757615456614f46565b5b5f61546485828601614f64565b925050602061547585828601614f64565b9150509250929050565b5f81519050919050565b5f82825260208201905092915050565b5f5b838110156154b657808201518184015260208101905061549b565b5f8484015250505050565b5f601f19601f8301169050919050565b5f6154db8261547f565b6154e58185615489565b93506154f5818560208601615499565b6154fe816154c1565b840191505092915050565b5f602083015f8301518482035f86015261552382826154d1565b9150508091505092915050565b5f6020820190508181035f8301526155488184615509565b905092915050565b5f805f806060858703121561556857615567614f46565b5b5f61557587828801614f64565b945050602061558687828801614f64565b935050604085013567ffffffffffffffff8111156155a7576155a6614f4a565b5b6155b387828801615194565b925092505092959194509250565b5f6040820190506155d45f830185615246565b6155e16020830184615246565b9392505050565b5f8115159050919050565b6155fc816155e8565b82525050565b5f6020820190506156155f8301846155f3565b92915050565b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b615655826154c1565b810181811067ffffffffffffffff821117156156745761567361561f565b5b80604052505050565b5f615686614f3d565b9050615692828261564c565b919050565b5f67ffffffffffffffff8211156156b1576156b061561f565b5b6156ba826154c1565b9050602081019050919050565b828183375f83830152505050565b5f6156e76156e284615697565b61567d565b9050828152602081018484840111156157035761570261561b565b5b61570e8482856156c7565b509392505050565b5f82601f83011261572a57615729614f78565b5b813561573a8482602086016156d5565b91505092915050565b5f806040838503121561575957615758614f46565b5b5f61576685828601615180565b925050602083013567ffffffffffffffff81111561578757615786614f4a565b5b61579385828601615716565b9150509250929050565b5f67ffffffffffffffff82169050919050565b6157b98161579d565b82525050565b5f8160030b9050919050565b6157d4816157bf565b82525050565b5f6040820190506157ed5f8301856157b0565b6157fa60208301846157cb565b9392505050565b5f819050919050565b5f61582461581f61581a8461513a565b615801565b61513a565b9050919050565b5f6158358261580a565b9050919050565b5f6158468261582b565b9050919050565b6158568161583c565b82525050565b5f60208201905061586f5f83018461584d565b92915050565b5f806040838503121561588b5761588a614f46565b5b5f61589885828601614f64565b92505060206158a985828601615180565b9150509250929050565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b5f6158e7838361505f565b60208301905092915050565b5f602082019050919050565b5f615909826158b3565b61591381856158bd565b935061591e836158cd565b805f5b8381101561594e57815161593588826158dc565b9750615940836158f3565b925050600181019050615921565b5085935050505092915050565b5f6020820190508181035f83015261597381846158ff565b905092915050565b5f805f6040848603121561599257615991614f46565b5b5f61599f86828701614f64565b935050602084013567ffffffffffffffff8111156159c0576159bf614f4a565b5b6159cc86828701615194565b92509250509250925092565b5f6020820190506159eb5f8301846157b0565b92915050565b5f81519050919050565b5f82825260208201905092915050565b5f615a15826159f1565b615a1f81856159fb565b9350615a2f818560208601615499565b615a38816154c1565b840191505092915050565b5f6020820190508181035f830152615a5b8184615a0b565b905092915050565b5f60208284031215615a7857615a77614f46565b5b5f615a8584828501615180565b91505092915050565b5f8083601f840112615aa357615aa2614f78565b5b8235905067ffffffffffffffff811115615ac057615abf614f7c565b5b602083019150836020820283011115615adc57615adb614f80565b5b9250929050565b5f805f60408486031215615afa57615af9614f46565b5b5f615b0786828701614f64565b935050602084013567ffffffffffffffff811115615b2857615b27614f4a565b5b615b3486828701615a8e565b92509250509250925092565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f615b7782614f0c565b9150615b8283614f0c565b9250828203905081811115615b9a57615b99615b40565b5b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4578747261206461746120746f6f206c617267650000000000000000000000005f82015250565b5f615c016014836159fb565b9150615c0c82615bcd565b602082019050919050565b5f6020820190508181035f830152615c2e81615bf5565b9050919050565b7f737962696c20666565206e6f74206d65740000000000000000000000000000005f82015250565b5f615c696011836159fb565b9150615c7482615c35565b602082019050919050565b5f6020820190508181035f830152615c9681615c5d565b9050919050565b5f615ca78261579d565b915067ffffffffffffffff8203615cc157615cc0615b40565b5b600182019050919050565b5f82825260208201905092915050565b5f615ce78385615ccc565b9350615cf48385846156c7565b615cfd836154c1565b840190509392505050565b5f606082019050615d1b5f830187614f15565b615d286020830186615246565b8181036040830152615d3b818486615cdc565b905095945050505050565b7f50726f6f6620736574206e6f74206c69766500000000000000000000000000005f82015250565b5f615d7a6012836159fb565b9150615d8582615d46565b602082019050919050565b5f6020820190508181035f830152615da781615d6e565b9050919050565b7f4d75737420616464206174206c65617374206f6e6520726f6f740000000000005f82015250565b5f615de2601a836159fb565b9150615ded82615dae565b602082019050919050565b5f6020820190508181035f830152615e0f81615dd6565b9050919050565b7f4f6e6c7920746865206f776e65722063616e2061646420726f6f7473000000005f82015250565b5f615e4a601c836159fb565b9150615e5582615e16565b602082019050919050565b5f6020820190508181035f830152615e7781615e3e565b9050919050565b5f80fd5b5f80fd5b5f80fd5b5f82356001604003833603038112615ea557615ea4615e7e565b5b80830191505092915050565b5f82356001602003833603038112615ecc57615ecb615e7e565b5b80830191505092915050565b5f615ee282614f0c565b9150615eed83614f0c565b9250828201905080821115615f0557615f04615b40565b5b92915050565b5f82825260208201905092915050565b5f819050919050565b5f80fd5b5f82356001602003833603038112615f4357615f42615f24565b5b82810191505092915050565b5f80fd5b5f80fd5b5f8083356001602003843603038112615f7357615f72615f24565b5b83810192508235915060208301925067ffffffffffffffff821115615f9b57615f9a615f4f565b5b600182023603831315615fb157615fb0615f53565b5b509250929050565b5f615fc48385615489565b9350615fd18385846156c7565b615fda836154c1565b840190509392505050565b5f60208301615ff65f840184615f57565b8583035f870152616008838284615fb9565b925050508091505092915050565b5f6160246020840184614f64565b905092915050565b5f6040830161603d5f840184615f28565b8482035f86015261604e8282615fe5565b91505061605e6020840184616016565b61606b602086018261505f565b508091505092915050565b5f616081838361602c565b905092915050565b5f823560016040038336030381126160a4576160a3615f24565b5b82810191505092915050565b5f602082019050919050565b5f6160c78385615f0b565b9350836020840285016160d984615f1b565b805f5b8781101561611c5784840389526160f38284616089565b6160fd8582616076565b9450616108836160b0565b925060208a019950506001810190506160dc565b50829750879450505050509392505050565b5f6080820190506161415f830189614f15565b61614e6020830188614f15565b81810360408301526161618186886160bc565b90508181036060830152616176818486615cdc565b9050979650505050505050565b7f4f6e6c7920746865206f776e65722063616e207363686564756c652072656d6f5f8201527f76616c206f6620726f6f74730000000000000000000000000000000000000000602082015250565b5f6161dd602c836159fb565b91506161e882616183565b604082019050919050565b5f6020820190508181035f83015261620a816161d1565b9050919050565b7f546f6f206d616e792072656d6f76616c73207761697420666f72206e657874205f8201527f70726f76696e6720706572696f6420746f207363686564756c65000000000000602082015250565b5f61626b603a836159fb565b915061627682616211565b604082019050919050565b5f6020820190508181035f8301526162988161625f565b9050919050565b7f43616e206f6e6c79207363686564756c652072656d6f76616c206f66206578695f8201527f7374696e6720726f6f7473000000000000000000000000000000000000000000602082015250565b5f6162f9602b836159fb565b91506163048261629f565b604082019050919050565b5f6020820190508181035f830152616326816162ed565b9050919050565b5f80fd5b82818337505050565b5f61634583856158bd565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8311156163785761637761632d565b5b602083029250616389838584616331565b82840190509392505050565b5f6060820190506163a85f830188614f15565b81810360208301526163bb81868861633a565b905081810360408301526163d0818486615cdc565b90509695505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f600282049050600182168061642057607f821691505b602082108103616433576164326163dc565b5b50919050565b5f819050919050565b61645361644e82614f0c565b616439565b82525050565b5f6164648284616442565b60208201915081905092915050565b5f81905092915050565b5f6164878261547f565b6164918185616473565b93506164a1818560208601615499565b80840191505092915050565b5f6164b8828461647d565b915081905092915050565b7f52616e646f6d6e65737320707265636f6d70696c652063616c6c206661696c655f8201527f6400000000000000000000000000000000000000000000000000000000000000602082015250565b5f61651d6021836159fb565b9150616528826164c3565b604082019050919050565b5f6020820190508181035f83015261654a81616511565b9050919050565b5f8151905061655f81614f4e565b92915050565b5f6020828403121561657a57616579614f46565b5b5f61658784828501616551565b91505092915050565b7f6f6e6c7920746865206f776e65722063616e206d6f766520746f206e657874205f8201527f70726f76696e6720706572696f64000000000000000000000000000000000000602082015250565b5f6165ea602e836159fb565b91506165f582616590565b604082019050919050565b5f6020820190508181035f830152616617816165de565b9050919050565b7f63616e206f6e6c792073746172742070726f76696e67206f6e6365206c6561765f8201527f6573206172652061646465640000000000000000000000000000000000000000602082015250565b5f616678602c836159fb565b91506166838261661e565b604082019050919050565b5f6020820190508181035f8301526166a58161666c565b9050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603160045260245ffd5b7f6368616c6c656e67652065706f6368206d757374206265206174206c656173745f8201527f206368616c6c656e676546696e616c6974792065706f63687320696e2074686560208201527f2066757475726500000000000000000000000000000000000000000000000000604082015250565b5f6167596047836159fb565b9150616764826166d9565b606082019050919050565b5f6020820190508181035f8301526167868161674d565b9050919050565b5f6080820190506167a05f830188614f15565b6167ad6020830187614f15565b6167ba6040830186614f15565b81810360608301526167cd818486615cdc565b90509695505050505050565b5f6040820190506167ec5f830185614f15565b6167f96020830184614f15565b9392505050565b5f61680a82614f0c565b915061681583614f0c565b925082820261682381614f0c565b9150828204841483151761683a57616839615b40565b5b5092915050565b5f6040820190506168545f83018561535d565b6168616020830184614f15565b9392505050565b5f80fd5b5f8160070b9050919050565b6168818161686c565b811461688b575f80fd5b50565b5f8151905061689c81616878565b92915050565b6168ab8161579d565b81146168b5575f80fd5b50565b5f815190506168c6816168a2565b92915050565b6168d5816157bf565b81146168df575f80fd5b50565b5f815190506168f0816168cc565b92915050565b5f6080828403121561690b5761690a616868565b5b616915608061567d565b90505f6169248482850161688e565b5f830152506020616937848285016168b8565b602083015250604061694b848285016168e2565b604083015250606061695f84828501616551565b60608301525092915050565b5f608082840312156169805761697f614f46565b5b5f61698d848285016168f6565b91505092915050565b7f6661696c656420746f2076616c69646174653a207072696365206d75737420625f8201527f652067726561746572207468616e203000000000000000000000000000000000602082015250565b5f6169f06030836159fb565b91506169fb82616996565b604082019050919050565b5f6020820190508181035f830152616a1d816169e4565b9050919050565b7f4f6e6c79207468652063757272656e74206f776e65722063616e2070726f706f5f8201527f73652061206e6577206f776e6572000000000000000000000000000000000000602082015250565b5f616a7e602e836159fb565b9150616a8982616a24565b604082019050919050565b5f6020820190508181035f830152616aab81616a72565b9050919050565b7f6368616c6c656e676552616e6765202d312073686f756c6420616c69676e20775f8201527f697468207468652076657279206c617374206c656166206f66206120726f6f74602082015250565b5f616b0c6040836159fb565b9150616b1782616ab2565b604082019050919050565b5f6020820190508181035f830152616b3981616b00565b9050919050565b7f70726f6f6620736574206964206f7574206f6620626f756e64730000000000005f82015250565b5f616b74601a836159fb565b9150616b7f82616b40565b602082019050919050565b5f6020820190508181035f830152616ba181616b68565b9050919050565b7f4f6e6c7920746865206f776e65722063616e2064656c6574652070726f6f66205f8201527f7365747300000000000000000000000000000000000000000000000000000000602082015250565b5f616c026024836159fb565b9150616c0d82616ba8565b604082019050919050565b5f6020820190508181035f830152616c2f81616bf6565b9050919050565b5f606082019050616c495f830187614f15565b616c566020830186614f15565b8181036040830152616c69818486615cdc565b905095945050505050565b7f4f6e6c79207468652070726f706f736564206f776e65722063616e20636c61695f8201527f6d206f776e657273686970000000000000000000000000000000000000000000602082015250565b5f616cce602b836159fb565b9150616cd982616c74565b604082019050919050565b5f6020820190508181035f830152616cfb81616cc2565b9050919050565b7f7072656d61747572652070726f6f6600000000000000000000000000000000005f82015250565b5f616d36600f836159fb565b9150616d4182616d02565b602082019050919050565b5f6020820190508181035f830152616d6381616d2a565b9050919050565b7f656d7074792070726f6f660000000000000000000000000000000000000000005f82015250565b5f616d9e600b836159fb565b9150616da982616d6a565b602082019050919050565b5f6020820190508181035f830152616dcb81616d92565b9050919050565b7f6e6f206368616c6c656e6765207363686564756c6564000000000000000000005f82015250565b5f616e066016836159fb565b9150616e1182616dd2565b602082019050919050565b5f6020820190508181035f830152616e3381616dfa565b9050919050565b5f8160c01b9050919050565b5f616e5082616e3a565b9050919050565b616e68616e638261579d565b616e46565b82525050565b5f616e798286616442565b602082019150616e898285616442565b602082019150616e998284616e57565b600882019150819050949350505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601260045260245ffd5b5f616ee182614f0c565b9150616eec83614f0c565b925082616efc57616efb616eaa565b5b828206905092915050565b5f82356001604003833603038112616f2257616f21615e7e565b5b80830191505092915050565b5f8083356001602003843603038112616f4a57616f49615e7e565b5b80840192508235915067ffffffffffffffff821115616f6c57616f6b615e82565b5b602083019250602082023603831315616f8857616f87615e86565b5b509250929050565b7f70726f6f6620646964206e6f74207665726966790000000000000000000000005f82015250565b5f616fc46014836159fb565b9150616fcf82616f90565b602082019050919050565b5f6020820190508181035f830152616ff181616fb8565b9050919050565b5f60808201905061700b5f830187614f15565b6170186020830186614f15565b6170256040830185614f15565b6170326060830184614f15565b95945050505050565b5f819050919050565b5f61705e6170596170548461703b565b615801565b61579d565b9050919050565b61706e81617044565b82525050565b5f6020820190506170875f830184617065565b92915050565b7f4c65616620696e646578206f7574206f6620626f756e647300000000000000005f82015250565b5f6170c16018836159fb565b91506170cc8261708d565b602082019050919050565b5f6020820190508181035f8301526170ee816170b5565b9050919050565b5f6170ff82614f0c565b91505f820361711157617110615b40565b5b600182039050919050565b5f61712682614f0c565b915061713183614f0c565b92508261714157617140616eaa565b5b828204905092915050565b7f496e636f72726563742066656520616d6f756e740000000000000000000000005f82015250565b5f6171806014836159fb565b915061718b8261714c565b602082019050919050565b5f6020820190508181035f8301526171ad81617174565b9050919050565b50565b5f6171c25f83616473565b91506171cd826171b4565b5f82019050919050565b5f6171e1826171b7565b9150819050919050565b7f4275726e206661696c65640000000000000000000000000000000000000000005f82015250565b5f61721f600b836159fb565b915061722a826171eb565b602082019050919050565b5f6020820190508181035f83015261724c81617213565b9050919050565b7f53697a65206d7573742062652061206d756c7469706c65206f662033320000005f82015250565b5f617287601d836159fb565b915061729282617253565b602082019050919050565b5f6040820190506172b05f830184614f15565b81810360208301526172c18161727b565b905092915050565b7f53697a65206d7573742062652067726561746572207468616e203000000000005f82015250565b5f6172fd601b836159fb565b9150617308826172c9565b602082019050919050565b5f6040820190506173265f830184614f15565b8181036020830152617337816172f1565b905092915050565b7f526f6f742073697a65206d757374206265206c657373207468616e20325e35305f82015250565b5f6173736020836159fb565b915061737e8261733f565b602082019050919050565b5f60408201905061739c5f830184614f15565b81810360208301526173ad81617367565b905092915050565b5f6173bf82614f0c565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036173f1576173f0615b40565b5b600182019050919050565b5f808335600160200384360303811261741857617417615e7e565b5b80840192508235915067ffffffffffffffff82111561743a57617439615e82565b5b60208301925060018202360383131561745657617455615e86565b5b509250929050565b5f82905092915050565b5f819050815f5260205f209050919050565b5f6020601f8301049050919050565b5f82821b905092915050565b5f600883026174c47fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82617489565b6174ce8683617489565b95508019841693508086168417925050509392505050565b5f6175006174fb6174f684614f0c565b615801565b614f0c565b9050919050565b5f819050919050565b617519836174e6565b61752d61752582617507565b848454617495565b825550505050565b5f90565b617541617535565b61754c818484617510565b505050565b5b8181101561756f576175645f82617539565b600181019050617552565b5050565b601f8211156175b45761758581617468565b61758e8461747a565b8101602085101561759d578190505b6175b16175a98561747a565b830182617551565b50505b505050565b5f82821c905092915050565b5f6175d45f19846008026175b9565b1980831691505092915050565b5f6175ec83836175c5565b9150826002028217905092915050565b617606838361745e565b67ffffffffffffffff81111561761f5761761e61561f565b5b6176298254616409565b617634828285617573565b5f601f831160018114617661575f841561764f578287013590505b61765985826175e1565b8655506176c0565b601f19841661766f86617468565b5f5b8281101561769657848901358255600182019150602085019450602081019050617671565b868310156176b357848901356176af601f8916826175c5565b8355505b6001600288020188555050505b50505050505050565b6176d48383836175fc565b505050565b5f81015f83016176e981856173fc565b6176f48183866176c9565b505050505050565b61770682826176d9565b5050565b7f6661696c656420746f2076616c69646174653a20657374696d617465642067615f8201527f7320666565206d7573742062652067726561746572207468616e203000000000602082015250565b5f617764603c836159fb565b915061776f8261770a565b604082019050919050565b5f6020820190508181035f83015261779181617758565b9050919050565b7f6661696c656420746f2076616c69646174653a204174746f46494c20707269635f8201527f65206d7573742062652067726561746572207468616e20300000000000000000602082015250565b5f6177f26038836159fb565b91506177fd82617798565b604082019050919050565b5f6020820190508181035f83015261781f816177e6565b9050919050565b7f6661696c656420746f2076616c69646174653a207261772073697a65206d75735f8201527f742062652067726561746572207468616e203000000000000000000000000000602082015250565b5f6178806033836159fb565b915061788b82617826565b604082019050919050565b5f6020820190508181035f8301526178ad81617874565b9050919050565b5f8160011c9050919050565b5f808291508390505b6001851115617909578086048111156178e5576178e4615b40565b5b60018516156178f45780820291505b8081029050617902856178b4565b94506178c9565b94509492505050565b5f8261792157600190506179dc565b8161792e575f90506179dc565b8160018114617944576002811461794e5761797d565b60019150506179dc565b60ff8411156179605761795f615b40565b5b8360020a91508482111561797757617976615b40565b5b506179dc565b5060208310610133831016604e8410600b84101617156179b25782820a9050838111156179ad576179ac615b40565b5b6179dc565b6179bf84848460016178c0565b925090508184048111156179d6576179d5615b40565b5b81810290505b9392505050565b5f63ffffffff82169050919050565b5f6179fc82614f0c565b9150617a07836179e3565b9250617a347fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8484617912565b905092915050565b5f617a46826157bf565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffff800000008203617a7857617a77615b40565b5b815f039050919050565b617a8b81615354565b8114617a95575f80fd5b50565b5f81519050617aa681617a82565b92915050565b5f60208284031215617ac157617ac0614f46565b5b5f617ace84828501617a98565b91505092915050565b7f436964206461746120697320746f6f2073686f727400000000000000000000005f82015250565b5f617b0b6015836159fb565b9150617b1682617ad7565b602082019050919050565b5f6020820190508181035f830152617b3881617aff565b9050919050565b5f819050602082019050919050565b5f617b598251615354565b80915050919050565b5f617b6c8261547f565b82617b7684617b3f565b9050617b8181617b4e565b92506020821015617bc157617bbc7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83602003600802617489565b831692505b5050919050565b5f606082019050617bdb5f830186614f15565b617be860208301856157b0565b617bf560408301846157cb565b949350505050565b7f496e7075742065786365656473206d6178696d756d20696e743235362076616c5f8201527f7565000000000000000000000000000000000000000000000000000000000000602082015250565b5f617c576022836159fb565b9150617c6282617bfd565b604082019050919050565b5f6020820190508181035f830152617c8481617c4b565b9050919050565b5f819050919050565b5f617c9e82617c8b565b91507f80000000000000000000000000000000000000000000000000000000000000008203617cd057617ccf615b40565b5b815f03905091905056fea264697066735822122033620393606d94f1c8d591222f06f6214eafe22531657caf6478a039d4cfa51864736f6c63430008170033","sourceMap":"1708:31540:17:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2142:50;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;31969:511;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;13160:1027;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;1806:79;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;15412:981;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;2046:90;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;1935:47;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;2390:114;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;9892:181;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;17418:922;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;10634:196;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;9181:183;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;23615:406;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;25066:2013;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;2510:50;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;10149:216;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;;:::i;:::-;;;;;;;;8404:186;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;22003:452;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;4161:214:2;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;32769:477:17;;;;;;;;;;;;;:::i;:::-;;;;;;;;:::i;:::-;;;;;;;;3708:134:2;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;2198:46:17;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;2250:78;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;9649:185;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;12021:517;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;11506:406;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;3155:101:0;;;;;;;;;;;;;:::i;:::-;;8689:441:17;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;14259:848;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;11220:177;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;2441:144:0;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;7976:96:17;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;10905:200;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;1988:52;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;1819:58:2;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;1891:38:17;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;9418:169;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;12544:430;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;2566:43;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;3405:215:0;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;18696:3301:17;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;8149:148;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;7830:103;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;10371:195;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;:::i;:::-;;;;;;;;7269:192;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;2142:50;2188:4;2142:50;:::o;31969:511::-;32057:24;32183:11;32203:29;32214:10;:17;32225:5;32214:17;;;;;;;;;;;;32203:10;:29::i;:::-;32197:3;:35;;;;:::i;:::-;32183:49;;32242:31;32298:10;;:17;;32276:40;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;32242:74;;32331:9;32326:125;32350:10;;:17;;32346:1;:21;32326:125;;;32400:40;32414:5;32421:10;;32432:1;32421:13;;;;;;;:::i;:::-;;;;;;;;32436:3;32400:13;:40::i;:::-;32388:6;32395:1;32388:9;;;;;;;;:::i;:::-;;;;;;;:52;;;;32369:3;;;;;;;32326:125;;;;32467:6;32460:13;;;;31969:511;;;;;:::o;13160:1027::-;13256:7;2188:4;13283:9;;:16;;:39;;13275:72;;;;;;;;;;;;:::i;:::-;;;;;;;;;13357:16;13376:18;:16;:18::i;:::-;13357:37;;13425:8;13412:9;:21;;13404:51;;;;;;;;;;;;:::i;:::-;;;;;;;;;13465:17;13473:8;13465:7;:17::i;:::-;13508:8;13496:9;:20;13492:139;;;13578:10;13570:28;;:50;13611:8;13599:9;:20;;;;:::i;:::-;13570:50;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;13492:139;13641:13;13657:14;;:16;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;13641:32;;;;13710:1;13683:17;:24;13701:5;13683:24;;;;;;;;;;;:28;;;;2559:1;13721:18;:25;13740:5;13721:25;;;;;;;;;;;:50;;;;13855:10;13832:13;:20;13846:5;13832:20;;;;;;;;;;;;:33;;;;;;;;;;;;;;;;;;13901:12;13875:16;:23;13892:5;13875:23;;;;;;;;;;;;:38;;;;;;;;;;;;;;;;;;2608:1;13923:23;:30;13947:5;13923:30;;;;;;;;;;;:48;;;;14010:1;13986:26;;:12;:26;;;13982:128;;14040:12;14028:41;;;14070:5;14077:10;14089:9;;14028:71;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;13982:128;14147:10;14124:34;;14140:5;14124:34;;;;;;;;;;14175:5;14168:12;;;;13160:1027;;;;;:::o;1806:79::-;1843:42;1806:79;:::o;15412:981::-;15517:7;2188:4;15544:9;;:16;;:39;;15536:72;;;;;;;;;;;;:::i;:::-;;;;;;;;;15626:19;15639:5;15626:12;:19::i;:::-;15618:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;15704:1;15686:8;;:15;;:19;15678:58;;;;;;;;;;;;:::i;:::-;;;;;;;;;15778:10;15754:34;;:13;:20;15768:5;15754:20;;;;;;;;;;;;;;;;;;;;;:34;;;15746:75;;;;;;;;;;;;:::i;:::-;;;;;;;;;15831:18;15852:10;:17;15863:5;15852:17;;;;;;;;;;;;15831:38;;15879:24;15920:8;;:15;;15906:30;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;15879:57;;15953:9;15948:171;15972:8;;:15;;15968:1;:19;15948:171;;;16008:59;16019:5;16026:1;16029:8;;16038:1;16029:11;;;;;;;:::i;:::-;;;;;;;;;;;;;:::i;:::-;:16;;;;;;;;:::i;:::-;16047:8;;16056:1;16047:11;;;;;;;:::i;:::-;;;;;;;;;;;;;:::i;:::-;:19;;;16008:10;:59::i;:::-;;16107:1;16094:10;:14;;;;:::i;:::-;16081:7;16089:1;16081:10;;;;;;;;:::i;:::-;;;;;;;:27;;;;;15989:3;;;;;;;15948:171;;;;16144:5;16133:26;16151:7;16133:26;;;;;;:::i;:::-;;;;;;;;16170:20;16193:16;:23;16210:5;16193:23;;;;;;;;;;;;;;;;;;;;;16170:46;;16254:1;16230:26;;:12;:26;;;16226:133;;16284:12;16272:36;;;16309:5;16316:10;16328:8;;16338:9;;16272:76;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;16226:133;16376:10;16369:17;;;;;15412:981;;;;;;;:::o;2046:90::-;2094:42;2046:90;:::o;1935:47::-;1975:7;1935:47;:::o;2390:114::-;2438:66;2390:114;;;:::o;9892:181::-;9957:7;9984:19;9997:5;9984:12;:19::i;:::-;9976:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;10043:16;:23;10060:5;10043:23;;;;;;;;;;;;;;;;;;;;;10036:30;;9892:181;;;:::o;17418:922::-;2188:4;17538:9;;:16;;:39;;17530:72;;;;;;;;;;;;:::i;:::-;;;;;;;;;17620:19;17633:5;17620:12;:19::i;:::-;17612:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;17704:10;17680:34;;:13;:20;17694:5;17680:20;;;;;;;;;;;;;;;;;;;;;:34;;;17672:91;;;;;;;;;;;;:::i;:::-;;;;;;;;;2036:4;17798:17;:24;17816:5;17798:24;;;;;;;;;;;:31;;;;17781:7;;:14;;:48;;;;:::i;:::-;:73;;17773:144;;;;;;;;;;;;:::i;:::-;;;;;;;;;17933:9;17928:210;17952:7;;:14;;17948:1;:18;17928:210;;;18007:10;:17;18018:5;18007:17;;;;;;;;;;;;17994:7;;18002:1;17994:10;;;;;;;:::i;:::-;;;;;;;;:30;17986:86;;;;;;;;;;;;:::i;:::-;;;;;;;;;18086:17;:24;18104:5;18086:24;;;;;;;;;;;18116:7;;18124:1;18116:10;;;;;;;:::i;:::-;;;;;;;;18086:41;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;17968:3;;;;;;;17928:210;;;;18148:20;18171:16;:23;18188:5;18171:23;;;;;;;;;;;;;;;;;;;;;18148:46;;18232:1;18208:26;;:12;:26;;;18204:130;;18262:12;18250:46;;;18297:5;18304:7;;18313:9;;18250:73;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;18204:130;17520:820;17418:922;;;;;:::o;10634:196::-;10706:15;;:::i;:::-;10741:19;10754:5;10741:12;:19::i;:::-;10733:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;10800:8;:15;10809:5;10800:15;;;;;;;;;;;:23;10816:6;10800:23;;;;;;;;;;;10793:30;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;10634:196;;;;:::o;9181:183::-;9247:7;9274:19;9287:5;9274:12;:19::i;:::-;9266:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;9333:17;:24;9351:5;9333:24;;;;;;;;;;;;9326:31;;9181:183;;;:::o;23615:406::-;23674:7;23725:12;23739:19;2094:42;23762:32;;23812:5;23795:23;;;;;;;;:::i;:::-;;;;;;;;;;;;;23762:57;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;23724:95;;;;23882:7;23874:53;;;;;;;;;;;;:::i;:::-;;;;;;;;;23996:6;23985:29;;;;;;;;;;;;:::i;:::-;23978:36;;;;23615:406;;;:::o;25066:2013::-;2188:4;25183:9;;:16;;:39;;25175:72;;;;;;;;;;;;:::i;:::-;;;;;;;;;25279:13;:20;25293:5;25279:20;;;;;;;;;;;;;;;;;;;;;25265:34;;:10;:34;;;25257:93;;;;;;;;;;;;:::i;:::-;;;;;;;;;25395:1;25368:17;:24;25386:5;25368:24;;;;;;;;;;;;:28;25360:85;;;;;;;;;;;;:::i;:::-;;;;;;;;;2608:1;25468:23;:30;25492:5;25468:30;;;;;;;;;;;;:49;25464:125;;25566:12;25533:23;:30;25557:5;25533:30;;;;;;;;;;;:45;;;;25464:125;25648:26;25677:17;:24;25695:5;25677:24;;;;;;;;;;;25648:53;;25711:34;25762:8;:15;;;;25748:30;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;25711:67;;25794:9;25789:160;25813:17;:24;25809:1;:28;25789:160;;;25881:8;25908:1;25890:8;:15;;;;:19;;;;:::i;:::-;25881:29;;;;;;;;:::i;:::-;;;;;;;;;;25858:17;25876:1;25858:20;;;;;;;;:::i;:::-;;;;;;;:52;;;;;25924:8;:14;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;25839:3;;;;;;;25789:160;;;;25959:37;25971:5;25978:17;25959:11;:37::i;:::-;26024:5;26011:38;26031:17;26011:38;;;;;;:::i;:::-;;;;;;;;26138:17;:24;26156:5;26138:24;;;;;;;;;;;;26114:14;:21;26129:5;26114:21;;;;;;;;;;;:48;;;;26208:17;;26193:12;:32;;;;:::i;:::-;26176:14;:49;26172:161;;;26241:81;;;;;;;;;;:::i;:::-;;;;;;;;26172:161;26370:14;26342:18;:25;26361:5;26342:25;;;;;;;;;;;:42;;;;26576:1;26548:17;:24;26566:5;26548:24;;;;;;;;;;;;:29;26544:211;;26612:5;26598:20;;;;;;;;;;2608:1;26632:23;:30;26656:5;26632:30;;;;;;;;;;;:48;;;;2559:1;26694:18;:25;26713:5;26694:25;;;;;;;;;;;:50;;;;26544:211;26765:20;26788:16;:23;26805:5;26788:23;;;;;;;;;;;;;;;;;;;;;26765:46;;26849:1;26825:26;;:12;:26;;;26821:171;;26879:12;26867:43;;;26911:5;26918:18;:25;26937:5;26918:25;;;;;;;;;;;;26945:17;:24;26963:5;26945:24;;;;;;;;;;;;26971:9;;26867:114;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;26821:171;27024:5;27006:66;27031:14;27047:17;:24;27065:5;27047:24;;;;;;;;;;;;27006:66;;;;;;;:::i;:::-;;;;;;;;25165:1914;;;25066:2013;;;;:::o;2510:50::-;2559:1;2510:50;:::o;10149:216::-;10211:7;10220;10247:19;10260:5;10247:12;:19::i;:::-;10239:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;10307:13;:20;10321:5;10307:20;;;;;;;;;;;;;;;;;;;;;10329:21;:28;10351:5;10329:28;;;;;;;;;;;;;;;;;;;;;10299:59;;;;10149:216;;;:::o;8404:186::-;8474:4;8497:19;8510:5;8497:12;:19::i;:::-;:49;;;;;8529:10;:17;8540:5;8529:17;;;;;;;;;;;;8520:6;:26;8497:49;:86;;;;;8582:1;8550:14;:21;8565:5;8550:21;;;;;;;;;;;:29;8572:6;8550:29;;;;;;;;;;;;:33;8497:86;8490:93;;8404:186;;;;:::o;22003:452::-;22091:7;22110:15;22133:14;:21;22148:5;22133:21;;;;;;;;;;;;22128:2;:26;;;;:::i;:::-;22110:44;;22165:18;22185:21;22210:16;:14;:16::i;:::-;22164:62;;;;22244:204;22289:15;22318:11;22343:15;22372:7;22408:23;:30;22432:5;22408:30;;;;;;;;;;;;22393:12;:45;;;;:::i;:::-;22244:31;:204::i;:::-;22237:211;;;;;22003:452;;;;:::o;4161:214:2:-;2655:13;:11;:13::i;:::-;4276:36:::1;4294:17;4276;:36::i;:::-;4322:46;4344:17;4363:4;4322:21;:46::i;:::-;4161:214:::0;;:::o;32769:477:17:-;32816:6;32824:5;32890:34;2285:42;32927:24;;;2438:66;32965:21;;2239:5;32927:97;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;32890:134;;33060:1;33042:9;:15;;;:19;;;33034:80;;;;;;;;;;;;:::i;:::-;;;;;;;;;33206:9;:15;;;33224:9;:14;;;33191:48;;;;;32769:477;;:::o;3708:134:2:-;3777:7;2926:20;:18;:20::i;:::-;811:66:6::1;3803:32:2;;3796:39;;3708:134:::0;:::o;2198:46:17:-;2239:5;2198:46;:::o;2250:78::-;2285:42;2250:78;:::o;9649:185::-;9716:7;9743:19;9756:5;9743:12;:19::i;:::-;9735:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;9802:18;:25;9821:5;9802:25;;;;;;;;;;;;9795:32;;9649:185;;;:::o;12021:517::-;12109:19;12122:5;12109:12;:19::i;:::-;12101:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;12161:13;12177;:20;12191:5;12177:20;;;;;;;;;;;;;;;;;;;;;12161:36;;12224:10;12215:19;;:5;:19;;;12207:78;;;;;;;;;;;;:::i;:::-;;;;;;;;;12308:8;12299:17;;:5;:17;;;12295:237;;12423:21;:28;12445:5;12423:28;;;;;;;;;;;;12416:35;;;;;;;;;;;12295:237;;;12513:8;12482:21;:28;12504:5;12482:28;;;;;;;;;;;;:39;;;;;;;;;;;;;;;;;;12295:237;12091:447;12021:517;;:::o;11506:406::-;11572:16;11608:19;11621:5;11608:12;:19::i;:::-;11600:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;11660:26;11689:17;:24;11707:5;11689:24;;;;;;;;;;;11660:53;;11723:23;11763:8;:15;;;;11749:30;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;11723:56;;11794:9;11789:94;11813:8;:15;;;;11809:1;:19;11789:94;;;11861:8;11870:1;11861:11;;;;;;;;:::i;:::-;;;;;;;;;;11849:6;11856:1;11849:9;;;;;;;;:::i;:::-;;;;;;;:23;;;;;11830:3;;;;;;;11789:94;;;;11899:6;11892:13;;;;11506:406;;;:::o;3155:101:0:-;2334:13;:11;:13::i;:::-;3219:30:::1;3246:1;3219:18;:30::i;:::-;3155:101::o:0;8689:441:17:-;8767:4;8783:11;8803:29;8814:10;:17;8825:5;8814:17;;;;;;;;;;;;8803:10;:29::i;:::-;8797:3;:35;;;;:::i;:::-;8783:49;;8842:26;8871:50;8885:5;8914:1;8892:14;:21;8907:5;8892:21;;;;;;;;;;;;:23;;;;:::i;:::-;8917:3;8871:13;:50::i;:::-;8842:79;;8989:1;8953:14;:21;8968:5;8953:21;;;;;;;;;;;:33;8975:3;:10;;;8953:33;;;;;;;;;;;;:37;;;;:::i;:::-;8939:3;:10;;;:51;8931:128;;;;;;;;;;;;:::i;:::-;;;;;;;;;9076:23;9085:5;9092:6;9076:8;:23::i;:::-;:47;;;;;9113:3;:10;;;9103:6;:20;;9076:47;9069:54;;;;8689:441;;;;:::o;14259:848::-;2188:4;14349:9;;:16;;:39;;14341:72;;;;;;;;;;;;:::i;:::-;;;;;;;;;14436:14;;;;;;;;;;;14427:23;;:5;:23;14423:90;;14466:36;;;;;;;;;;:::i;:::-;;;;;;;;14423:90;14555:10;14531:34;;:13;:20;14545:5;14531:20;;;;;;;;;;;;;;;;;;;;;:34;;;14523:83;;;;;;;;;;;;:::i;:::-;;;;;;;;;14616:24;14643:17;:24;14661:5;14643:24;;;;;;;;;;;;14616:51;;14704:1;14677:17;:24;14695:5;14677:24;;;;;;;;;;;:28;;;;14746:1;14715:13;:20;14729:5;14715:20;;;;;;;;;;;;:33;;;;;;;;;;;;;;;;;;14786:1;14758:18;:25;14777:5;14758:25;;;;;;;;;;;:29;;;;2608:1;14797:23;:30;14821:5;14797:30;;;;;;;;;;;:48;;;;14856:20;14879:16;:23;14896:5;14879:23;;;;;;;;;;;;;;;;;;;;;14856:46;;14940:1;14916:26;;:12;:26;;;14912:134;;14970:12;14958:41;;;15000:5;15007:16;15025:9;;14958:77;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;14912:134;15076:5;15060:40;15083:16;15060:40;;;;;;:::i;:::-;;;;;;;;14331:776;;14259:848;;;:::o;11220:177::-;11283:7;11310:19;11323:5;11310:12;:19::i;:::-;11302:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;11369:14;:21;11384:5;11369:21;;;;;;;;;;;;11362:28;;11220:177;;;:::o;2441:144:0:-;2487:7;2506:24;2533:20;:18;:20::i;:::-;2506:47;;2570:1;:8;;;;;;;;;;;;2563:15;;;2441:144;:::o;7976:96:17:-;8026:6;8051:14;;;;;;;;;;;8044:21;;7976:96;:::o;10905:200::-;10983:7;11010:19;11023:5;11010:12;:19::i;:::-;11002:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;11069:14;:21;11084:5;11069:21;;;;;;;;;;;:29;11091:6;11069:29;;;;;;;;;;;;11062:36;;10905:200;;;;:::o;1988:52::-;2036:4;1988:52;:::o;1819:58:2:-;;;;;;;;;;;;;;;;;;;:::o;1891:38:17:-;1927:2;1891:38;:::o;9418:169::-;9477:7;9504:19;9517:5;9504:12;:19::i;:::-;9496:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;9563:10;:17;9574:5;9563:17;;;;;;;;;;;;9556:24;;9418:169;;;:::o;12544:430::-;12616:19;12629:5;12616:12;:19::i;:::-;12608:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;12708:10;12676:42;;:21;:28;12698:5;12676:28;;;;;;;;;;;;;;;;;;;;;:42;;;12668:98;;;;;;;;;;;;:::i;:::-;;;;;;;;;12776:16;12795:13;:20;12809:5;12795:20;;;;;;;;;;;;;;;;;;;;;12776:39;;12848:10;12825:13;:20;12839:5;12825:20;;;;;;;;;;;;:33;;;;;;;;;;;;;;;;;;12875:21;:28;12897:5;12875:28;;;;;;;;;;;;12868:35;;;;;;;;;;;12956:10;12918:49;;12946:8;12918:49;;12939:5;12918:49;;;;;;;;;;12598:376;12544:430;:::o;2566:43::-;2608:1;2566:43;:::o;3405:215:0:-;2334:13;:11;:13::i;:::-;3509:1:::1;3489:22;;:8;:22;;::::0;3485:91:::1;;3562:1;3534:31;;;;;;;;;;;:::i;:::-;;;;;;;;3485:91;3585:28;3604:8;3585:18;:28::i;:::-;3405:215:::0;:::o;18696:3301:17:-;18785:18;18806:9;18785:30;;18847:13;:20;18861:5;18847:20;;;;;;;;;;;;;;;;;;;;;18833:34;;:10;:34;;;18825:93;;;;;;;;;;;;:::i;:::-;;;;;;;;;18928:22;18953:18;:25;18972:5;18953:25;;;;;;;;;;;;18928:50;;19012:14;18996:12;:30;;18988:58;;;;;;;;;;;;:::i;:::-;;;;;;;;;19080:1;19064:6;;:13;;:17;19056:41;;;;;;;;;;;;:::i;:::-;;;;;;;;;2559:1;19115:14;:40;19107:75;;;;;;;;;;;;:::i;:::-;;;;;;;;;19192:35;19252:6;;:13;;19230:36;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;19192:74;;19285:12;19300:24;19318:5;19300:17;:24::i;:::-;19285:39;;19334:17;19354:14;:21;19369:5;19354:21;;;;;;;;;;;;19334:41;;19385:18;19412:29;19423:10;:17;19434:5;19423:17;;;;;;;;;;;;19412:10;:29::i;:::-;19406:3;:35;;;;:::i;:::-;19385:56;;19456:8;19451:1695;19474:6;;:13;;19470:1;:17;;;19451:1695;;;20584:20;20624:4;20630:5;20637:1;20607:32;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;20584:55;;20653:20;20706:9;20694:7;20684:18;;;;;;20676:27;;:39;;;;:::i;:::-;20653:62;;20840:46;20854:5;20861:12;20875:10;20840:13;:46::i;:::-;20824:10;20835:1;20824:13;;;;;;;;;;:::i;:::-;;;;;;;:62;;;;20900:16;20919:59;20938:39;20949:5;20956:10;20967:1;20956:13;;;;;;;;;;:::i;:::-;;;;;;;;:20;;;20938:10;:39::i;:::-;20919:18;:59::i;:::-;20900:78;;20992:7;21002:84;21022:6;;21029:1;21022:9;;;;;;;;;:::i;:::-;;;;;;;;;;;;;:::i;:::-;:15;;;;;;;;:::i;:::-;21002:84;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;21039:8;21049:6;;21056:1;21049:9;;;;;;;;;:::i;:::-;;;;;;;;;;;;;:::i;:::-;:14;;;21065:10;21076:1;21065:13;;;;;;;;;;:::i;:::-;;;;;;;;:20;;;21002:19;:84::i;:::-;20992:94;;21108:2;21100:35;;;;;;;;;;;;:::i;:::-;;;;;;;;;19494:1652;;;;19489:3;;;;;:::i;:::-;;;;19451:1695;;;;21526:15;21611:4;21605:2;21573:29;21595:6;;21573:21;:29::i;:::-;:34;;;;:::i;:::-;21572:43;;;;:::i;:::-;21558:9;21545:10;:22;;;;:::i;:::-;21544:72;;;;:::i;:::-;21526:90;;21626:40;21651:5;21658:7;21626:24;:40::i;:::-;21677:20;21700:16;:23;21717:5;21700:23;;;;;;;;;;;;;;;;;;;;;21677:46;;21761:1;21737:26;;:12;:26;;;21733:153;;21791:12;21779:42;;;21822:5;21829:17;:24;21847:5;21829:24;;;;;;;;;;;;21855:4;21861:6;;:13;;21779:96;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;21733:153;21928:12;21895:23;:30;21919:5;21895:30;;;;;;;;;;;:45;;;;21972:5;21955:35;21979:10;21955:35;;;;;;:::i;:::-;;;;;;;;18775:3222;;;;;;;;18696:3301;;;:::o;8149:148::-;8207:4;8238:14;;;;;;;;;;;8230:22;;:5;:22;:60;;;;;8288:1;8256:34;;:13;:20;8270:5;8256:20;;;;;;;;;;;;;;;;;;;;;:34;;;;8230:60;8223:67;;8149:148;;;:::o;7830:103::-;7883:7;7909:17;;7902:24;;7830:103;:::o;10371:195::-;10443:7;10470:19;10483:5;10470:12;:19::i;:::-;10462:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;10529:23;:30;10553:5;10529:30;;;;;;;;;;;;10522:37;;10371:195;;;:::o;7269:192::-;4158:30:1;4191:26;:24;:26::i;:::-;4158:59;;4279:19;4302:1;:15;;;;;;;;;;;;4301:16;4279:38;;4327:18;4348:1;:14;;;;;;;;;;;;4327:35;;4706:17;4741:1;4726:11;:16;;;:34;;;;;4746:14;4726:34;4706:54;;4770:17;4805:1;4790:11;:16;;;:50;;;;;4839:1;4818:4;4810:25;;;:30;4790:50;4770:70;;4856:12;4855:13;:30;;;;;4873:12;4872:13;4855:30;4851:91;;;4908:23;;;;;;;;;;;;;;4851:91;4968:1;4951;:14;;;:18;;;;;;;;;;;;;;;;;;4983:14;4979:67;;;5031:4;5013:1;:15;;;:22;;;;;;;;;;;;;;;;;;4979:67;7346:26:17::1;7361:10;7346:14;:26::i;:::-;7382:24;:22;:24::i;:::-;7436:18;7416:17;:38;;;;5070:14:1::0;5066:101;;;5118:5;5100:1;:15;;;:23;;;;;;;;;;;;;;;;;;5142:14;5154:1;5142:14;;;;;;:::i;:::-;;;;;;;;5066:101;4092:1081;;;;;7269:192:17;:::o;189:563:14:-;236:7;255:9;267:3;255:15;;280:9;309:3;304:1;:8;;300:12;;323:1;318;:6;314:32;;333:3;328:8;;;;;:::i;:::-;;;342:1;338:5;;314:32;364:2;359:1;:7;;355:11;;378:1;373;:6;369:32;;388:2;383:7;;;;;:::i;:::-;;;397:1;393:5;;369:32;419:2;414:1;:7;;410:11;;433:1;428;:6;424:32;;443:2;438:7;;;;;:::i;:::-;;;452:1;448:5;;424:32;474:2;469:1;:7;;465:11;;488:1;483;:6;479:32;;498:2;493:7;;;;;:::i;:::-;;;507:1;503:5;;479:32;529:1;524;:6;;520:10;;543:1;538;:6;534:32;;553:1;548:6;;;;;:::i;:::-;;;562:1;558:5;;534:32;584:1;579;:6;;575:10;;598:1;593;:6;589:32;;608:1;603:6;;;;;:::i;:::-;;;617:1;613:5;;589:32;639:1;634;:6;;630:10;;653:1;648;:6;644:32;;663:1;658:6;;;;;:::i;:::-;;;672:1;668:5;;644:32;694:1;689;:6;;685:10;;708:1;703;:6;699:24;;722:1;718;:5;;;;:::i;:::-;711:12;;;;;;699:24;744:1;740;:5;;;;:::i;:::-;733:12;;;;189:563;;;;:::o;30599:1307:17:-;30692:22;;:::i;:::-;30746:17;:24;30764:5;30746:24;;;;;;;;;;;;30734:9;:36;30726:73;;;;;;;;;;;;:::i;:::-;;;;;;;;;30809:17;30842:1;30835:3;30830:1;:8;;30829:14;;;;:::i;:::-;30809:34;;30853:11;30973:17;31005:9;31017:3;31005:15;;31000:616;31026:1;31022;:5;31000:616;;;31169:10;:17;31180:5;31169:17;;;;;;;;;;;;31156:9;:30;31152:120;;31229:1;31225;:5;;;;:::i;:::-;31219:1;:12;;31206:25;;;;;:::i;:::-;;;31249:8;;31152:120;31304:13;:20;31318:5;31304:20;;;;;;;;;;;:31;31325:9;31304:31;;;;;;;;;;;;31298:3;:37;;;;:::i;:::-;31286:49;;31390:9;31377;:22;31373:233;;31426:13;:20;31440:5;31426:20;;;;;;;;;;;:31;31447:9;31426:31;;;;;;;;;;;;31419:38;;;;;:::i;:::-;;;31498:1;31494;:5;;;;:::i;:::-;31488:1;:12;;31475:25;;;;;:::i;:::-;;;31373:233;;;31589:1;31585;:5;;;;:::i;:::-;31579:1;:12;;31566:25;;;;;:::i;:::-;;;31373:233;31000:616;31029:3;;;;;:::i;:::-;;;;31000:616;;;;31643:13;:20;31657:5;31643:20;;;;;;;;;;;:31;31664:9;31643:31;;;;;;;;;;;;31637:3;:37;;;;:::i;:::-;31625:49;;31701:9;31688;:22;31684:141;;31761:53;;;;;;;;31789:1;31777:9;:13;;;;:::i;:::-;31761:53;;;;31804:9;31792;:21;;;;:::i;:::-;31761:53;;;31754:60;;;;;;;31684:141;31856:43;;;;;;;;31872:9;31856:43;;;;31895:3;31883:9;:15;;;;:::i;:::-;31856:43;;;31849:50;;;;;30599:1307;;;;;;:::o;3623:85:16:-;3666:7;354:2;228:1;270:4;:15;;;;:::i;:::-;336:20;;;;:::i;:::-;3685:16;;3623:85;:::o;7557:215:17:-;7630:6;7617:9;:19;;7609:52;;;;;;;;;;;;:::i;:::-;;;;;;;;;7672:12;1843:42;7690:15;;7713:6;7690:34;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;7671:53;;;7742:7;7734:31;;;;;;;;;;;;:::i;:::-;;;;;;;;;7599:173;7557:215;:::o;16449:793::-;16560:7;16606:1;1927:2;16583:7;:19;;;;:::i;:::-;:24;16579:116;;16643:7;16630:54;;;;;;;;;;;:::i;:::-;;;;;;;;16579:116;16719:1;16708:7;:12;16704:102;;16756:7;16743:52;;;;;;;;;;;:::i;:::-;;;;;;;;16704:102;1975:7;16819;:23;16815:118;;;16878:7;16865:57;;;;;;;;;;;:::i;:::-;;;;;;;;16815:118;16943:17;1927:2;16963:7;:19;;;;:::i;:::-;16943:39;;16992:14;17009:10;:17;17020:5;17009:17;;;;;;;;;;;;:19;;;;;;;;;:::i;:::-;;;;;16992:36;;17038;17049:5;17056:9;17067:6;17038:10;:36::i;:::-;17110:4;17084:8;:15;17093:5;17084:15;;;;;;;;;;;:23;17100:6;17084:23;;;;;;;;;;;:30;;;;;;:::i;:::-;;;;17156:9;17124:14;:21;17139:5;17124:21;;;;;;;;;;;:29;17146:6;17124:29;;;;;;;;;;;:41;;;;17203:9;17175:17;:24;17193:5;17175:24;;;;;;;;;;;;:37;;;;;;;:::i;:::-;;;;;;;;17229:6;17222:13;;;;16449:793;;;;;;:::o;27132:342::-;27221:19;27234:5;27221:12;:19::i;:::-;27213:50;;;;;;;;;;;;:::i;:::-;;;;;;;;;27273:18;27310:9;27305:115;27329:7;:14;27325:1;:18;27305:115;;;27377:32;27391:5;27398:7;27406:1;27398:10;;;;;;;;:::i;:::-;;;;;;;;27377:13;:32::i;:::-;27363:46;;;;;:::i;:::-;;;27345:3;;;;;;;27305:115;;;;27457:10;27429:17;:24;27447:5;27429:24;;;;;;;;;;;;:38;;;;;;;:::i;:::-;;;;;;;;27203:271;27132:342;;:::o;1662:1834:16:-;1885:7;1938:1;1920:15;:19;:41;;;;1960:1;1943:13;:18;1920:41;1912:114;;;;;;;;;;;;:::i;:::-;;;;;;;;;2058:1;2044:11;:15;;;2036:84;;;;;;;;;;;;:::i;:::-;;;;;;;;;2148:1;2138:7;:11;2130:75;;;;;;;;;;;;:::i;:::-;;;;;;;;;2276:29;2338:1;2319:15;:20;;;2315:452;;2522:15;2509:2;:29;;;;:::i;:::-;2494:11;2460:45;;1018:5;903:7;2460:31;;;;:::i;:::-;:45;;;;:::i;:::-;:79;;;;:::i;:::-;228:1;270:4;:15;;;;:::i;:::-;496:1;2380:58;;;;:::i;:::-;2379:161;;;;:::i;:::-;2355:185;;2315:452;;;2744:11;2710:45;;1018:5;903:7;2710:31;;;;:::i;:::-;:45;;;;:::i;:::-;2672:15;2671:16;;;:::i;:::-;2658:2;:30;;;;:::i;:::-;228:1;270:4;:15;;;;:::i;:::-;496:1;2596:58;;;;:::i;:::-;:93;;;;:::i;:::-;2595:161;;;;:::i;:::-;2571:185;;2315:452;2834:30;2906:7;2891:12;2867:21;:36;;;;:::i;:::-;:46;;;;:::i;:::-;2834:79;;2956:21;3036:3;792:1;2981:22;:51;;;;:::i;:::-;2980:59;;;;:::i;:::-;2956:83;;3049:20;3127:3;682:1;3073:22;:50;;;;:::i;:::-;3072:58;;;;:::i;:::-;3049:81;;3164:13;3145:15;:32;3141:349;;3200:1;3193:8;;;;;;;;3141:349;3289:12;3270:15;:31;3266:224;;3340:15;3324:13;:31;;;;:::i;:::-;3317:38;;;;;;;;3266:224;3476:3;574:1;3427:22;:45;;;;:::i;:::-;3426:53;;;;:::i;:::-;3419:60;;;;;;1662:1834;;;;;;;;:::o;4603:312:2:-;4692:6;4675:23;;4683:4;4675:23;;;:120;;;;4789:6;4753:42;;:32;:30;:32::i;:::-;:42;;;;4675:120;4658:251;;;4869:29;;;;;;;;;;;;;;4658:251;4603:312::o;7467:84:17:-;2334:13:0;:11;:13::i;:::-;7467:84:17;:::o;6057:538:2:-;6174:17;6156:50;;;:52;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;6152:437;;6560:17;6518:60;;;;;;;;;;;:::i;:::-;;;;;;;;6152:437;811:66:6;6258:32:2;;6250:4;:40;6246:120;;6346:4;6317:34;;;;;;;;;;;:::i;:::-;;;;;;;;6246:120;6379:54;6409:17;6428:4;6379:29;:54::i;:::-;6209:235;6057:538;;:::o;5032:213::-;5115:6;5098:23;;5106:4;5098:23;;;5094:145;;5199:29;;;;;;;;;;;;;;5094:145;5032:213::o;2658:162:0:-;2728:12;:10;:12::i;:::-;2717:23;;:7;:5;:7::i;:::-;:23;;;2713:101;;2790:12;:10;:12::i;:::-;2763:40;;;;;;;;;;;:::i;:::-;;;;;;;;2713:101;2658:162::o;3774:248::-;3847:24;3874:20;:18;:20::i;:::-;3847:47;;3904:16;3923:1;:8;;;;;;;;;;;;3904:27;;3952:8;3941:1;:8;;;:19;;;;;;;;;;;;;;;;;;4006:8;3975:40;;3996:8;3975:40;;;;;;;;;;;;3837:185;;3774:248;:::o;1192:159::-;1244:24;1313:22;1303:32;;1192:159;:::o;24028:138:17:-;24093:7;24119:40;24133:18;:25;24152:5;24133:25;;;;;;;;;;;;24119:13;:40::i;:::-;24112:47;;24028:138;;;:::o;274:339:15:-;336:7;382:2;363:3;:8;;;:15;:21;;355:55;;;;;;;;;;;;:::i;:::-;;;;;;;;;420:22;455:2;445:13;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;420:38;;473:6;468:104;489:2;485:1;:6;468:104;;;527:3;:8;;;559:1;554:2;536:3;:8;;;:15;:20;;;;:::i;:::-;:24;;;;:::i;:::-;527:34;;;;;;;;:::i;:::-;;;;;;;;;;512:9;522:1;512:12;;;;;;;;:::i;:::-;;;;;:49;;;;;;;;;;;493:3;;;;;;;468:104;;;;596:9;588:18;;;:::i;:::-;581:25;;;274:339;;;:::o;951:188:18:-;1060:4;1128;1083:41;1102:5;1109:4;1115:8;1083:18;:41::i;:::-;:49;1076:56;;951:188;;;;;;:::o;23225:384:17:-;23304:7;23323:20;23346:1;23323:24;;23362:9;23357:217;23381:6;;:13;;23377:1;:17;23357:217;;;23560:2;23535:6;;23542:1;23535:9;;;;;;;:::i;:::-;;;;;;;;;;;;;:::i;:::-;:15;;;;;;;;:::i;:::-;:22;;:27;;;;:::i;:::-;23529:2;:34;;;;:::i;:::-;23513:50;;;;;:::i;:::-;;;23396:3;;;;;;;23357:217;;;;23590:12;23583:19;;;23225:384;;;;:::o;22461:758::-;22546:23;22582:13;22572:7;:23;;;;:::i;:::-;22546:49;;22605:15;22628:14;:21;22643:5;22628:21;;;;;;;;;;;;22623:2;:26;;;;:::i;:::-;22605:44;;22660:18;22680:21;22705:16;:14;:16::i;:::-;22659:62;;;;22732:16;22751:204;22796:15;22825:11;22850:15;22879:7;22915:23;:30;22939:5;22915:30;;;;;;;;;;;;22900:12;:45;;;;:::i;:::-;22751:31;:204::i;:::-;22732:223;;22973:17;22981:8;22973:7;:17::i;:::-;23016:8;23004:9;:20;23000:139;;;23086:10;23078:28;;:50;23119:8;23107:9;:20;;;;:::i;:::-;23078:50;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;23000:139;23166:5;23153:59;23173:8;23183:11;23196:15;23153:59;;;;;;;;:::i;:::-;;;;;;;;22536:683;;;;;22461:758;;:::o;8737:170:1:-;8795:30;8870:21;8860:31;;8737:170;:::o;1847:127:0:-;6931:20:1;:18;:20::i;:::-;1929:38:0::1;1954:12;1929:24;:38::i;:::-;1847:127:::0;:::o;2970:67:2:-;6931:20:1;:18;:20::i;:::-;2970:67:2:o;29480:420:17:-;29565:13;29581:6;29565:22;;29597:9;29609:22;29625:5;29609:15;:22::i;:::-;29597:34;;29642:11;29656:5;29642:19;;29726:9;29721:129;29745:1;29741;:5;29721:129;;;29767:9;29793:1;29788;:6;;29779:5;:16;;;;:::i;:::-;29767:28;;29816:13;:20;29830:5;29816:20;;;;;;;;;;;:23;29837:1;29816:23;;;;;;;;;;;;29809:30;;;;;:::i;:::-;;;29753:97;29748:3;;;;;;;29721:129;;;;29890:3;29859:13;:20;29873:5;29859:20;;;;;;;;;;;:28;29880:6;29859:28;;;;;;;;;;;:34;;;;29555:345;;;29480:420;;;:::o;27648:296::-;27720:7;27739:13;27755:14;:21;27770:5;27755:21;;;;;;;;;;;:29;27777:6;27755:29;;;;;;;;;;;;27739:45;;27794:35;27808:5;27815:6;27823:5;27794:13;:35::i;:::-;27846:14;:21;27861:5;27846:21;;;;;;;;;;;:29;27868:6;27846:29;;;;;;;;;;;27839:36;;;27892:8;:15;27901:5;27892:15;;;;;;;;;;;:23;27908:6;27892:23;;;;;;;;;;;;27885:30;;;;;;;;:::i;:::-;;;27932:5;27925:12;;;27648:296;;;;:::o;1441:138:6:-;1493:7;1519:47;811:66;1546:19;;1519:26;:47::i;:::-;:53;;;;;;;;;;;;1512:60;;1441:138;:::o;2264:344::-;2355:37;2374:17;2355:18;:37::i;:::-;2425:17;2407:36;;;;;;;;;;;;2472:1;2458:4;:11;:15;2454:148;;;2489:53;2518:17;2537:4;2489:28;:53::i;:::-;;2454:148;;;2573:18;:16;:18::i;:::-;2454:148;2264:344;;:::o;887:96:3:-;940:7;966:10;959:17;;887:96;:::o;1422:633:18:-;1529:7;1548:20;1571:4;1548:27;;1590:9;1585:435;1609:5;:12;1605:1;:16;1585:435;;;1736:15;1754:5;1760:1;1754:8;;;;;;;;:::i;:::-;;;;;;;;1736:26;;1796:1;1791;1780:8;:12;;;;:::i;:::-;:17;1776:207;;1832:41;1851:12;1865:7;1832:18;:41::i;:::-;1817:56;;1776:207;;;1927:41;1946:7;1955:12;1927:18;:41::i;:::-;1912:56;;1776:207;2008:1;1996:13;;;;;:::i;:::-;;;1628:392;1623:3;;;;;;;1585:435;;;;2036:12;2029:19;;;1422:633;;;;;:::o;7084:141:1:-;7151:17;:15;:17::i;:::-;7146:73;;7191:17;;;;;;;;;;;;;;7146:73;7084:141::o;1980:235:0:-;6931:20:1;:18;:20::i;:::-;2100:1:0::1;2076:26;;:12;:26;;::::0;2072:95:::1;;2153:1;2125:31;;;;;;;;;;;:::i;:::-;;;;;;;;2072:95;2176:32;2195:12;2176:18;:32::i;:::-;1980:235:::0;:::o;32605:117:17:-;32668:7;32694:21;32713:1;32705:5;:9;;;;:::i;:::-;32694:10;:21::i;:::-;32687:28;;32605:117;;;:::o;29944:537::-;30031:11;30059:29;30070:10;:17;30081:5;30070:17;;;;;;;;;;;;30059:10;:29::i;:::-;30053:3;:35;;;;:::i;:::-;30031:58;;30099:9;30119:22;30135:5;30119:15;:22::i;:::-;30099:43;;30299:176;30311:3;30306:1;:8;;:37;;;;;30326:10;:17;30337:5;30326:17;;;;;;;;;;;;30318:5;:25;30306:37;30299:176;;;30390:5;30359:13;:20;30373:5;30359:20;;;;;;;;;;;:27;30380:5;30359:27;;;;;;;;;;;;:36;;;;;;;:::i;:::-;;;;;;;;30423:1;30418;:6;;30409:15;;;;;:::i;:::-;;;30442:22;30458:5;30442:15;:22::i;:::-;30438:26;;30299:176;;;30021:460;;29944:537;;;:::o;1899:163:10:-;1960:21;2042:4;2032:14;;1899:163;;;:::o;1671:281:6:-;1781:1;1748:17;:29;;;:34;1744:119;;1834:17;1805:47;;;;;;;;;;;:::i;:::-;;;;;;;;1744:119;1928:17;1872:47;811:66;1899:19;;1872:26;:47::i;:::-;:53;;;:73;;;;;;;;;;;;;;;;;;1671:281;:::o;3900:253:8:-;3983:12;4008;4022:23;4049:6;:19;;4069:4;4049:25;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;4007:67;;;;4091:55;4118:6;4126:7;4135:10;4091:26;:55::i;:::-;4084:62;;;;3900:253;;;;:::o;6113:122:6:-;6175:1;6163:9;:13;6159:70;;;6199:19;;;;;;;;;;;;;;6159:70;6113:122::o;11407:121:18:-;11473:7;11499:22;11516:1;11519;11499:16;:22::i;:::-;11492:29;;11407:121;;;;:::o;8487:120:1:-;8537:4;8560:26;:24;:26::i;:::-;:40;;;;;;;;;;;;8553:47;;8487:120;:::o;1606:793:14:-;1653:7;1693:16;1680:1;:30;;1672:77;;;;;;;;;;;;:::i;:::-;;;;;;;;;1759:9;1771:3;1759:15;;1786:8;1805:1;1797:10;;;:::i;:::-;1786:21;;1832:1;1821;:13;1817:17;;1853:1;1848;:6;1844:40;;1870:3;;;;;:::i;:::-;;;;1844:40;1912:1;784:66;1897:1;:11;:16;1893:55;;1934:3;1929:8;;;;;:::i;:::-;;;1893:55;1975:1;881:66;1961:1;:10;:15;1957:53;;1997:2;1992:7;;;;;:::i;:::-;;;1957:53;2037:1;978:66;2023:1;:10;:15;2019:53;;2059:2;2054:7;;;;;:::i;:::-;;;2019:53;2099:1;1075:66;2085:1;:10;:15;2081:53;;2121:2;2116:7;;;;;:::i;:::-;;;2081:53;2160:1;1171:66;2147:1;:9;:14;2143:51;;2182:1;2177:6;;;;;:::i;:::-;;;2143:51;2220:1;1267:66;2207:1;:9;:14;2203:51;;2242:1;2237:6;;;;;:::i;:::-;;;2203:51;2280:1;1363:66;2267:1;:9;:14;2263:51;;2302:1;2297:6;;;;;:::i;:::-;;;2263:51;2340:1;1459:66;2327:1;:9;:14;2323:51;;2362:1;2357:6;;;;;:::i;:::-;;;2323:51;2391:1;2384:8;;;;1606:793;;;:::o;4421:582:8:-;4565:12;4594:7;4589:408;;4617:19;4625:10;4617:7;:19::i;:::-;4589:408;;;4862:1;4841:10;:17;:22;:49;;;;;4889:1;4867:6;:18;;;:23;4841:49;4837:119;;;4934:6;4917:24;;;;;;;;;;;:::i;:::-;;;;;;;;4837:119;4976:10;4969:17;;;;4589:408;4421:582;;;;;;:::o;11645:532:18:-;11715:13;11792:1;11786:4;11779:15;11820:1;11814:4;11807:15;11941:4;11935;11929;11923;11918:3;11911:5;11900:46;11890:102;;11976:1;11973;11966:12;11890:102;12033:4;12027:11;12018:20;;12149:11;12142:5;12138:23;12129:32;;11645:532;;;;:::o;5543:487:8:-;5694:1;5674:10;:17;:21;5670:354;;;5871:10;5865:17;5927:15;5914:10;5910:2;5906:19;5899:44;5670:354;5994:19;;;;;;;;;;;;;;-1:-1:-1;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;:::o;:::-;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;:::o;7:77:20:-;44:7;73:5;62:16;;7:77;;;:::o;90:118::-;177:24;195:5;177:24;:::i;:::-;172:3;165:37;90:118;;:::o;214:222::-;307:4;345:2;334:9;330:18;322:26;;358:71;426:1;415:9;411:17;402:6;358:71;:::i;:::-;214:222;;;;:::o;442:75::-;475:6;508:2;502:9;492:19;;442:75;:::o;523:117::-;632:1;629;622:12;646:117;755:1;752;745:12;769:122;842:24;860:5;842:24;:::i;:::-;835:5;832:35;822:63;;881:1;878;871:12;822:63;769:122;:::o;897:139::-;943:5;981:6;968:20;959:29;;997:33;1024:5;997:33;:::i;:::-;897:139;;;;:::o;1042:117::-;1151:1;1148;1141:12;1165:117;1274:1;1271;1264:12;1288:117;1397:1;1394;1387:12;1428:568;1501:8;1511:6;1561:3;1554:4;1546:6;1542:17;1538:27;1528:122;;1569:79;;:::i;:::-;1528:122;1682:6;1669:20;1659:30;;1712:18;1704:6;1701:30;1698:117;;;1734:79;;:::i;:::-;1698:117;1848:4;1840:6;1836:17;1824:29;;1902:3;1894:4;1886:6;1882:17;1872:8;1868:32;1865:41;1862:128;;;1909:79;;:::i;:::-;1862:128;1428:568;;;;;:::o;2002:704::-;2097:6;2105;2113;2162:2;2150:9;2141:7;2137:23;2133:32;2130:119;;;2168:79;;:::i;:::-;2130:119;2288:1;2313:53;2358:7;2349:6;2338:9;2334:22;2313:53;:::i;:::-;2303:63;;2259:117;2443:2;2432:9;2428:18;2415:32;2474:18;2466:6;2463:30;2460:117;;;2496:79;;:::i;:::-;2460:117;2609:80;2681:7;2672:6;2661:9;2657:22;2609:80;:::i;:::-;2591:98;;;;2386:313;2002:704;;;;;:::o;2712:147::-;2812:6;2846:5;2840:12;2830:22;;2712:147;;;:::o;2865:217::-;2997:11;3031:6;3026:3;3019:19;3071:4;3066:3;3062:14;3047:29;;2865:217;;;;:::o;3088:165::-;3188:4;3211:3;3203:11;;3241:4;3236:3;3232:14;3224:22;;3088:165;;;:::o;3259:108::-;3336:24;3354:5;3336:24;:::i;:::-;3331:3;3324:37;3259:108;;:::o;3453:517::-;3606:4;3601:3;3597:14;3695:4;3688:5;3684:16;3678:23;3714:63;3771:4;3766:3;3762:14;3748:12;3714:63;:::i;:::-;3621:166;3871:4;3864:5;3860:16;3854:23;3890:63;3947:4;3942:3;3938:14;3924:12;3890:63;:::i;:::-;3797:166;3575:395;3453:517;;:::o;3976:311::-;4111:10;4132:112;4240:3;4232:6;4132:112;:::i;:::-;4276:4;4271:3;4267:14;4253:28;;3976:311;;;;:::o;4293:146::-;4396:4;4428;4423:3;4419:14;4411:22;;4293:146;;;:::o;4529:996::-;4714:3;4743:87;4824:5;4743:87;:::i;:::-;4846:119;4958:6;4953:3;4846:119;:::i;:::-;4839:126;;4989:89;5072:5;4989:89;:::i;:::-;5101:7;5132:1;5117:383;5142:6;5139:1;5136:13;5117:383;;;5218:6;5212:13;5245:129;5370:3;5355:13;5245:129;:::i;:::-;5238:136;;5397:93;5483:6;5397:93;:::i;:::-;5387:103;;5177:323;5164:1;5161;5157:9;5152:14;;5117:383;;;5121:14;5516:3;5509:10;;4719:806;;;4529:996;;;;:::o;5531:505::-;5740:4;5778:2;5767:9;5763:18;5755:26;;5827:9;5821:4;5817:20;5813:1;5802:9;5798:17;5791:47;5855:174;6024:4;6015:6;5855:174;:::i;:::-;5847:182;;5531:505;;;;:::o;6042:126::-;6079:7;6119:42;6112:5;6108:54;6097:65;;6042:126;;;:::o;6174:96::-;6211:7;6240:24;6258:5;6240:24;:::i;:::-;6229:35;;6174:96;;;:::o;6276:122::-;6349:24;6367:5;6349:24;:::i;:::-;6342:5;6339:35;6329:63;;6388:1;6385;6378:12;6329:63;6276:122;:::o;6404:139::-;6450:5;6488:6;6475:20;6466:29;;6504:33;6531:5;6504:33;:::i;:::-;6404:139;;;;:::o;6562:552::-;6619:8;6629:6;6679:3;6672:4;6664:6;6660:17;6656:27;6646:122;;6687:79;;:::i;:::-;6646:122;6800:6;6787:20;6777:30;;6830:18;6822:6;6819:30;6816:117;;;6852:79;;:::i;:::-;6816:117;6966:4;6958:6;6954:17;6942:29;;7020:3;7012:4;7004:6;7000:17;6990:8;6986:32;6983:41;6980:128;;;7027:79;;:::i;:::-;6980:128;6562:552;;;;;:::o;7120:672::-;7199:6;7207;7215;7264:2;7252:9;7243:7;7239:23;7235:32;7232:119;;;7270:79;;:::i;:::-;7232:119;7390:1;7415:53;7460:7;7451:6;7440:9;7436:22;7415:53;:::i;:::-;7405:63;;7361:117;7545:2;7534:9;7530:18;7517:32;7576:18;7568:6;7565:30;7562:117;;;7598:79;;:::i;:::-;7562:117;7711:64;7767:7;7758:6;7747:9;7743:22;7711:64;:::i;:::-;7693:82;;;;7488:297;7120:672;;;;;:::o;7798:118::-;7885:24;7903:5;7885:24;:::i;:::-;7880:3;7873:37;7798:118;;:::o;7922:222::-;8015:4;8053:2;8042:9;8038:18;8030:26;;8066:71;8134:1;8123:9;8119:17;8110:6;8066:71;:::i;:::-;7922:222;;;;:::o;8187:596::-;8288:8;8298:6;8348:3;8341:4;8333:6;8329:17;8325:27;8315:122;;8356:79;;:::i;:::-;8315:122;8469:6;8456:20;8446:30;;8499:18;8491:6;8488:30;8485:117;;;8521:79;;:::i;:::-;8485:117;8635:4;8627:6;8623:17;8611:29;;8689:3;8681:4;8673:6;8669:17;8659:8;8655:32;8652:41;8649:128;;;8696:79;;:::i;:::-;8649:128;8187:596;;;;;:::o;8789:1103::-;8932:6;8940;8948;8956;8964;9013:2;9001:9;8992:7;8988:23;8984:32;8981:119;;;9019:79;;:::i;:::-;8981:119;9139:1;9164:53;9209:7;9200:6;9189:9;9185:22;9164:53;:::i;:::-;9154:63;;9110:117;9294:2;9283:9;9279:18;9266:32;9325:18;9317:6;9314:30;9311:117;;;9347:79;;:::i;:::-;9311:117;9460:108;9560:7;9551:6;9540:9;9536:22;9460:108;:::i;:::-;9442:126;;;;9237:341;9645:2;9634:9;9630:18;9617:32;9676:18;9668:6;9665:30;9662:117;;;9698:79;;:::i;:::-;9662:117;9811:64;9867:7;9858:6;9847:9;9843:22;9811:64;:::i;:::-;9793:82;;;;9588:297;8789:1103;;;;;;;;:::o;9898:77::-;9935:7;9964:5;9953:16;;9898:77;;;:::o;9981:118::-;10068:24;10086:5;10068:24;:::i;:::-;10063:3;10056:37;9981:118;;:::o;10105:222::-;10198:4;10236:2;10225:9;10221:18;10213:26;;10249:71;10317:1;10306:9;10302:17;10293:6;10249:71;:::i;:::-;10105:222;;;;:::o;10333:329::-;10392:6;10441:2;10429:9;10420:7;10416:23;10412:32;10409:119;;;10447:79;;:::i;:::-;10409:119;10567:1;10592:53;10637:7;10628:6;10617:9;10613:22;10592:53;:::i;:::-;10582:63;;10538:117;10333:329;;;;:::o;10668:1047::-;10783:6;10791;10799;10807;10815;10864:2;10852:9;10843:7;10839:23;10835:32;10832:119;;;10870:79;;:::i;:::-;10832:119;10990:1;11015:53;11060:7;11051:6;11040:9;11036:22;11015:53;:::i;:::-;11005:63;;10961:117;11145:2;11134:9;11130:18;11117:32;11176:18;11168:6;11165:30;11162:117;;;11198:79;;:::i;:::-;11162:117;11311:80;11383:7;11374:6;11363:9;11359:22;11311:80;:::i;:::-;11293:98;;;;11088:313;11468:2;11457:9;11453:18;11440:32;11499:18;11491:6;11488:30;11485:117;;;11521:79;;:::i;:::-;11485:117;11634:64;11690:7;11681:6;11670:9;11666:22;11634:64;:::i;:::-;11616:82;;;;11411:297;10668:1047;;;;;;;;:::o;11721:474::-;11789:6;11797;11846:2;11834:9;11825:7;11821:23;11817:32;11814:119;;;11852:79;;:::i;:::-;11814:119;11972:1;11997:53;12042:7;12033:6;12022:9;12018:22;11997:53;:::i;:::-;11987:63;;11943:117;12099:2;12125:53;12170:7;12161:6;12150:9;12146:22;12125:53;:::i;:::-;12115:63;;12070:118;11721:474;;;;;:::o;12201:98::-;12252:6;12286:5;12280:12;12270:22;;12201:98;;;:::o;12305:158::-;12378:11;12412:6;12407:3;12400:19;12452:4;12447:3;12443:14;12428:29;;12305:158;;;;:::o;12469:246::-;12550:1;12560:113;12574:6;12571:1;12568:13;12560:113;;;12659:1;12654:3;12650:11;12644:18;12640:1;12635:3;12631:11;12624:39;12596:2;12593:1;12589:10;12584:15;;12560:113;;;12707:1;12698:6;12693:3;12689:16;12682:27;12531:184;12469:246;;;:::o;12721:102::-;12762:6;12813:2;12809:7;12804:2;12797:5;12793:14;12789:28;12779:38;;12721:102;;;:::o;12829:353::-;12905:3;12933:38;12965:5;12933:38;:::i;:::-;12987:60;13040:6;13035:3;12987:60;:::i;:::-;12980:67;;13056:65;13114:6;13109:3;13102:4;13095:5;13091:16;13056:65;:::i;:::-;13146:29;13168:6;13146:29;:::i;:::-;13141:3;13137:39;13130:46;;12909:273;12829:353;;;;:::o;13230:422::-;13341:3;13377:4;13372:3;13368:14;13464:4;13457:5;13453:16;13447:23;13517:3;13511:4;13507:14;13500:4;13495:3;13491:14;13484:38;13543:71;13609:4;13595:12;13543:71;:::i;:::-;13535:79;;13392:233;13642:4;13635:11;;13346:306;13230:422;;;;:::o;13658:357::-;13793:4;13831:2;13820:9;13816:18;13808:26;;13880:9;13874:4;13870:20;13866:1;13855:9;13851:17;13844:47;13908:100;14003:4;13994:6;13908:100;:::i;:::-;13900:108;;13658:357;;;;:::o;14021:817::-;14109:6;14117;14125;14133;14182:2;14170:9;14161:7;14157:23;14153:32;14150:119;;;14188:79;;:::i;:::-;14150:119;14308:1;14333:53;14378:7;14369:6;14358:9;14354:22;14333:53;:::i;:::-;14323:63;;14279:117;14435:2;14461:53;14506:7;14497:6;14486:9;14482:22;14461:53;:::i;:::-;14451:63;;14406:118;14591:2;14580:9;14576:18;14563:32;14622:18;14614:6;14611:30;14608:117;;;14644:79;;:::i;:::-;14608:117;14757:64;14813:7;14804:6;14793:9;14789:22;14757:64;:::i;:::-;14739:82;;;;14534:297;14021:817;;;;;;;:::o;14844:332::-;14965:4;15003:2;14992:9;14988:18;14980:26;;15016:71;15084:1;15073:9;15069:17;15060:6;15016:71;:::i;:::-;15097:72;15165:2;15154:9;15150:18;15141:6;15097:72;:::i;:::-;14844:332;;;;;:::o;15182:90::-;15216:7;15259:5;15252:13;15245:21;15234:32;;15182:90;;;:::o;15278:109::-;15359:21;15374:5;15359:21;:::i;:::-;15354:3;15347:34;15278:109;;:::o;15393:210::-;15480:4;15518:2;15507:9;15503:18;15495:26;;15531:65;15593:1;15582:9;15578:17;15569:6;15531:65;:::i;:::-;15393:210;;;;:::o;15609:117::-;15718:1;15715;15708:12;15732:180;15780:77;15777:1;15770:88;15877:4;15874:1;15867:15;15901:4;15898:1;15891:15;15918:281;16001:27;16023:4;16001:27;:::i;:::-;15993:6;15989:40;16131:6;16119:10;16116:22;16095:18;16083:10;16080:34;16077:62;16074:88;;;16142:18;;:::i;:::-;16074:88;16182:10;16178:2;16171:22;15961:238;15918:281;;:::o;16205:129::-;16239:6;16266:20;;:::i;:::-;16256:30;;16295:33;16323:4;16315:6;16295:33;:::i;:::-;16205:129;;;:::o;16340:307::-;16401:4;16491:18;16483:6;16480:30;16477:56;;;16513:18;;:::i;:::-;16477:56;16551:29;16573:6;16551:29;:::i;:::-;16543:37;;16635:4;16629;16625:15;16617:23;;16340:307;;;:::o;16653:146::-;16750:6;16745:3;16740;16727:30;16791:1;16782:6;16777:3;16773:16;16766:27;16653:146;;;:::o;16805:423::-;16882:5;16907:65;16923:48;16964:6;16923:48;:::i;:::-;16907:65;:::i;:::-;16898:74;;16995:6;16988:5;16981:21;17033:4;17026:5;17022:16;17071:3;17062:6;17057:3;17053:16;17050:25;17047:112;;;17078:79;;:::i;:::-;17047:112;17168:54;17215:6;17210:3;17205;17168:54;:::i;:::-;16888:340;16805:423;;;;;:::o;17247:338::-;17302:5;17351:3;17344:4;17336:6;17332:17;17328:27;17318:122;;17359:79;;:::i;:::-;17318:122;17476:6;17463:20;17501:78;17575:3;17567:6;17560:4;17552:6;17548:17;17501:78;:::i;:::-;17492:87;;17308:277;17247:338;;;;:::o;17591:652::-;17668:6;17676;17725:2;17713:9;17704:7;17700:23;17696:32;17693:119;;;17731:79;;:::i;:::-;17693:119;17851:1;17876:53;17921:7;17912:6;17901:9;17897:22;17876:53;:::i;:::-;17866:63;;17822:117;18006:2;17995:9;17991:18;17978:32;18037:18;18029:6;18026:30;18023:117;;;18059:79;;:::i;:::-;18023:117;18164:62;18218:7;18209:6;18198:9;18194:22;18164:62;:::i;:::-;18154:72;;17949:287;17591:652;;;;;:::o;18249:101::-;18285:7;18325:18;18318:5;18314:30;18303:41;;18249:101;;;:::o;18356:115::-;18441:23;18458:5;18441:23;:::i;:::-;18436:3;18429:36;18356:115;;:::o;18477:90::-;18512:7;18555:5;18552:1;18541:20;18530:31;;18477:90;;;:::o;18573:112::-;18656:22;18672:5;18656:22;:::i;:::-;18651:3;18644:35;18573:112;;:::o;18691:320::-;18806:4;18844:2;18833:9;18829:18;18821:26;;18857:69;18923:1;18912:9;18908:17;18899:6;18857:69;:::i;:::-;18936:68;19000:2;18989:9;18985:18;18976:6;18936:68;:::i;:::-;18691:320;;;;;:::o;19017:60::-;19045:3;19066:5;19059:12;;19017:60;;;:::o;19083:142::-;19133:9;19166:53;19184:34;19193:24;19211:5;19193:24;:::i;:::-;19184:34;:::i;:::-;19166:53;:::i;:::-;19153:66;;19083:142;;;:::o;19231:126::-;19281:9;19314:37;19345:5;19314:37;:::i;:::-;19301:50;;19231:126;;;:::o;19363:140::-;19427:9;19460:37;19491:5;19460:37;:::i;:::-;19447:50;;19363:140;;;:::o;19509:159::-;19610:51;19655:5;19610:51;:::i;:::-;19605:3;19598:64;19509:159;;:::o;19674:250::-;19781:4;19819:2;19808:9;19804:18;19796:26;;19832:85;19914:1;19903:9;19899:17;19890:6;19832:85;:::i;:::-;19674:250;;;;:::o;19930:474::-;19998:6;20006;20055:2;20043:9;20034:7;20030:23;20026:32;20023:119;;;20061:79;;:::i;:::-;20023:119;20181:1;20206:53;20251:7;20242:6;20231:9;20227:22;20206:53;:::i;:::-;20196:63;;20152:117;20308:2;20334:53;20379:7;20370:6;20359:9;20355:22;20334:53;:::i;:::-;20324:63;;20279:118;19930:474;;;;;:::o;20410:114::-;20477:6;20511:5;20505:12;20495:22;;20410:114;;;:::o;20530:184::-;20629:11;20663:6;20658:3;20651:19;20703:4;20698:3;20694:14;20679:29;;20530:184;;;;:::o;20720:132::-;20787:4;20810:3;20802:11;;20840:4;20835:3;20831:14;20823:22;;20720:132;;;:::o;20858:179::-;20927:10;20948:46;20990:3;20982:6;20948:46;:::i;:::-;21026:4;21021:3;21017:14;21003:28;;20858:179;;;;:::o;21043:113::-;21113:4;21145;21140:3;21136:14;21128:22;;21043:113;;;:::o;21192:732::-;21311:3;21340:54;21388:5;21340:54;:::i;:::-;21410:86;21489:6;21484:3;21410:86;:::i;:::-;21403:93;;21520:56;21570:5;21520:56;:::i;:::-;21599:7;21630:1;21615:284;21640:6;21637:1;21634:13;21615:284;;;21716:6;21710:13;21743:63;21802:3;21787:13;21743:63;:::i;:::-;21736:70;;21829:60;21882:6;21829:60;:::i;:::-;21819:70;;21675:224;21662:1;21659;21655:9;21650:14;;21615:284;;;21619:14;21915:3;21908:10;;21316:608;;;21192:732;;;;:::o;21930:373::-;22073:4;22111:2;22100:9;22096:18;22088:26;;22160:9;22154:4;22150:20;22146:1;22135:9;22131:17;22124:47;22188:108;22291:4;22282:6;22188:108;:::i;:::-;22180:116;;21930:373;;;;:::o;22309:672::-;22388:6;22396;22404;22453:2;22441:9;22432:7;22428:23;22424:32;22421:119;;;22459:79;;:::i;:::-;22421:119;22579:1;22604:53;22649:7;22640:6;22629:9;22625:22;22604:53;:::i;:::-;22594:63;;22550:117;22734:2;22723:9;22719:18;22706:32;22765:18;22757:6;22754:30;22751:117;;;22787:79;;:::i;:::-;22751:117;22900:64;22956:7;22947:6;22936:9;22932:22;22900:64;:::i;:::-;22882:82;;;;22677:297;22309:672;;;;;:::o;22987:218::-;23078:4;23116:2;23105:9;23101:18;23093:26;;23129:69;23195:1;23184:9;23180:17;23171:6;23129:69;:::i;:::-;22987:218;;;;:::o;23211:99::-;23263:6;23297:5;23291:12;23281:22;;23211:99;;;:::o;23316:169::-;23400:11;23434:6;23429:3;23422:19;23474:4;23469:3;23465:14;23450:29;;23316:169;;;;:::o;23491:377::-;23579:3;23607:39;23640:5;23607:39;:::i;:::-;23662:71;23726:6;23721:3;23662:71;:::i;:::-;23655:78;;23742:65;23800:6;23795:3;23788:4;23781:5;23777:16;23742:65;:::i;:::-;23832:29;23854:6;23832:29;:::i;:::-;23827:3;23823:39;23816:46;;23583:285;23491:377;;;;:::o;23874:313::-;23987:4;24025:2;24014:9;24010:18;24002:26;;24074:9;24068:4;24064:20;24060:1;24049:9;24045:17;24038:47;24102:78;24175:4;24166:6;24102:78;:::i;:::-;24094:86;;23874:313;;;;:::o;24193:329::-;24252:6;24301:2;24289:9;24280:7;24276:23;24272:32;24269:119;;;24307:79;;:::i;:::-;24269:119;24427:1;24452:53;24497:7;24488:6;24477:9;24473:22;24452:53;:::i;:::-;24442:63;;24398:117;24193:329;;;;:::o;24562:593::-;24660:8;24670:6;24720:3;24713:4;24705:6;24701:17;24697:27;24687:122;;24728:79;;:::i;:::-;24687:122;24841:6;24828:20;24818:30;;24871:18;24863:6;24860:30;24857:117;;;24893:79;;:::i;:::-;24857:117;25007:4;24999:6;24995:17;24983:29;;25061:3;25053:4;25045:6;25041:17;25031:8;25027:32;25024:41;25021:128;;;25068:79;;:::i;:::-;25021:128;24562:593;;;;;:::o;25161:754::-;25281:6;25289;25297;25346:2;25334:9;25325:7;25321:23;25317:32;25314:119;;;25352:79;;:::i;:::-;25314:119;25472:1;25497:53;25542:7;25533:6;25522:9;25518:22;25497:53;:::i;:::-;25487:63;;25443:117;25627:2;25616:9;25612:18;25599:32;25658:18;25650:6;25647:30;25644:117;;;25680:79;;:::i;:::-;25644:117;25793:105;25890:7;25881:6;25870:9;25866:22;25793:105;:::i;:::-;25775:123;;;;25570:338;25161:754;;;;;:::o;25921:180::-;25969:77;25966:1;25959:88;26066:4;26063:1;26056:15;26090:4;26087:1;26080:15;26107:194;26147:4;26167:20;26185:1;26167:20;:::i;:::-;26162:25;;26201:20;26219:1;26201:20;:::i;:::-;26196:25;;26245:1;26242;26238:9;26230:17;;26269:1;26263:4;26260:11;26257:37;;;26274:18;;:::i;:::-;26257:37;26107:194;;;;:::o;26307:180::-;26355:77;26352:1;26345:88;26452:4;26449:1;26442:15;26476:4;26473:1;26466:15;26493:170;26633:22;26629:1;26621:6;26617:14;26610:46;26493:170;:::o;26669:366::-;26811:3;26832:67;26896:2;26891:3;26832:67;:::i;:::-;26825:74;;26908:93;26997:3;26908:93;:::i;:::-;27026:2;27021:3;27017:12;27010:19;;26669:366;;;:::o;27041:419::-;27207:4;27245:2;27234:9;27230:18;27222:26;;27294:9;27288:4;27284:20;27280:1;27269:9;27265:17;27258:47;27322:131;27448:4;27322:131;:::i;:::-;27314:139;;27041:419;;;:::o;27466:167::-;27606:19;27602:1;27594:6;27590:14;27583:43;27466:167;:::o;27639:366::-;27781:3;27802:67;27866:2;27861:3;27802:67;:::i;:::-;27795:74;;27878:93;27967:3;27878:93;:::i;:::-;27996:2;27991:3;27987:12;27980:19;;27639:366;;;:::o;28011:419::-;28177:4;28215:2;28204:9;28200:18;28192:26;;28264:9;28258:4;28254:20;28250:1;28239:9;28235:17;28228:47;28292:131;28418:4;28292:131;:::i;:::-;28284:139;;28011:419;;;:::o;28436:183::-;28474:3;28497:23;28514:5;28497:23;:::i;:::-;28488:32;;28542:18;28535:5;28532:29;28529:55;;28564:18;;:::i;:::-;28529:55;28611:1;28604:5;28600:13;28593:20;;28436:183;;;:::o;28625:168::-;28708:11;28742:6;28737:3;28730:19;28782:4;28777:3;28773:14;28758:29;;28625:168;;;;:::o;28821:314::-;28917:3;28938:70;29001:6;28996:3;28938:70;:::i;:::-;28931:77;;29018:56;29067:6;29062:3;29055:5;29018:56;:::i;:::-;29099:29;29121:6;29099:29;:::i;:::-;29094:3;29090:39;29083:46;;28821:314;;;;;:::o;29141:549::-;29318:4;29356:2;29345:9;29341:18;29333:26;;29369:71;29437:1;29426:9;29422:17;29413:6;29369:71;:::i;:::-;29450:72;29518:2;29507:9;29503:18;29494:6;29450:72;:::i;:::-;29569:9;29563:4;29559:20;29554:2;29543:9;29539:18;29532:48;29597:86;29678:4;29669:6;29661;29597:86;:::i;:::-;29589:94;;29141:549;;;;;;;:::o;29696:168::-;29836:20;29832:1;29824:6;29820:14;29813:44;29696:168;:::o;29870:366::-;30012:3;30033:67;30097:2;30092:3;30033:67;:::i;:::-;30026:74;;30109:93;30198:3;30109:93;:::i;:::-;30227:2;30222:3;30218:12;30211:19;;29870:366;;;:::o;30242:419::-;30408:4;30446:2;30435:9;30431:18;30423:26;;30495:9;30489:4;30485:20;30481:1;30470:9;30466:17;30459:47;30523:131;30649:4;30523:131;:::i;:::-;30515:139;;30242:419;;;:::o;30667:176::-;30807:28;30803:1;30795:6;30791:14;30784:52;30667:176;:::o;30849:366::-;30991:3;31012:67;31076:2;31071:3;31012:67;:::i;:::-;31005:74;;31088:93;31177:3;31088:93;:::i;:::-;31206:2;31201:3;31197:12;31190:19;;30849:366;;;:::o;31221:419::-;31387:4;31425:2;31414:9;31410:18;31402:26;;31474:9;31468:4;31464:20;31460:1;31449:9;31445:17;31438:47;31502:131;31628:4;31502:131;:::i;:::-;31494:139;;31221:419;;;:::o;31646:178::-;31786:30;31782:1;31774:6;31770:14;31763:54;31646:178;:::o;31830:366::-;31972:3;31993:67;32057:2;32052:3;31993:67;:::i;:::-;31986:74;;32069:93;32158:3;32069:93;:::i;:::-;32187:2;32182:3;32178:12;32171:19;;31830:366;;;:::o;32202:419::-;32368:4;32406:2;32395:9;32391:18;32383:26;;32455:9;32449:4;32445:20;32441:1;32430:9;32426:17;32419:47;32483:131;32609:4;32483:131;:::i;:::-;32475:139;;32202:419;;;:::o;32627:117::-;32736:1;32733;32726:12;32750:117;32859:1;32856;32849:12;32873:117;32982:1;32979;32972:12;32996:394;33090:4;33144:11;33131:25;33244:1;33238:4;33234:12;33223:8;33207:14;33203:29;33199:48;33179:18;33175:73;33165:168;;33252:79;;:::i;:::-;33165:168;33364:18;33354:8;33350:33;33342:41;;33095:295;32996:394;;;;:::o;33396:389::-;33485:4;33539:11;33526:25;33639:1;33633:4;33629:12;33618:8;33602:14;33598:29;33594:48;33574:18;33570:73;33560:168;;33647:79;;:::i;:::-;33560:168;33759:18;33749:8;33745:33;33737:41;;33490:295;33396:389;;;;:::o;33791:191::-;33831:3;33850:20;33868:1;33850:20;:::i;:::-;33845:25;;33884:20;33902:1;33884:20;:::i;:::-;33879:25;;33927:1;33924;33920:9;33913:16;;33948:3;33945:1;33942:10;33939:36;;;33955:18;;:::i;:::-;33939:36;33791:191;;;;:::o;33988:210::-;34113:11;34147:6;34142:3;34135:19;34187:4;34182:3;34178:14;34163:29;;33988:210;;;;:::o;34204:130::-;34301:4;34324:3;34316:11;;34204:130;;;:::o;34340:117::-;34449:1;34446;34439:12;34463:370;34539:5;34594:3;34581:17;34686:1;34680:4;34676:12;34665:8;34649:14;34645:29;34641:48;34621:18;34617:73;34607:168;;34694:79;;:::i;:::-;34607:168;34817:8;34797:18;34793:33;34784:42;;34545:288;34463:370;;;;:::o;34839:117::-;34948:1;34945;34938:12;34962:117;35071:1;35068;35061:12;35085:711;35149:5;35156:6;35212:3;35199:17;35304:1;35298:4;35294:12;35283:8;35267:14;35263:29;35259:48;35239:18;35235:73;35225:168;;35312:79;;:::i;:::-;35225:168;35435:8;35415:18;35411:33;35402:42;;35477:5;35464:19;35454:29;;35512:4;35505:5;35501:16;35492:25;;35540:18;35532:6;35529:30;35526:117;;;35562:79;;:::i;:::-;35526:117;35698:4;35690:6;35686:17;35670:14;35666:38;35659:5;35655:50;35652:137;;;35708:79;;:::i;:::-;35652:137;35163:633;35085:711;;;;;:::o;35824:294::-;35910:3;35931:60;35984:6;35979:3;35931:60;:::i;:::-;35924:67;;36001:56;36050:6;36045:3;36038:5;36001:56;:::i;:::-;36082:29;36104:6;36082:29;:::i;:::-;36077:3;36073:39;36066:46;;35824:294;;;;;:::o;36166:482::-;36269:3;36305:4;36300:3;36296:14;36389:61;36444:4;36437:5;36433:16;36426:5;36389:61;:::i;:::-;36497:3;36491:4;36487:14;36480:4;36475:3;36471:14;36464:38;36523:87;36605:4;36591:12;36577;36523:87;:::i;:::-;36515:95;;36320:301;;36638:4;36631:11;;36274:374;36166:482;;;;:::o;36654:122::-;36706:5;36731:39;36766:2;36761:3;36757:12;36752:3;36731:39;:::i;:::-;36722:48;;36654:122;;;;:::o;36848:704::-;36961:3;36997:4;36992:3;36988:14;37067:73;37134:4;37127:5;37123:16;37116:5;37067:73;:::i;:::-;37187:3;37181:4;37177:14;37170:4;37165:3;37161:14;37154:38;37213:97;37305:4;37291:12;37213:97;:::i;:::-;37205:105;;37012:309;37389:50;37433:4;37426:5;37422:16;37415:5;37389:50;:::i;:::-;37452:63;37509:4;37504:3;37500:14;37486:12;37452:63;:::i;:::-;37331:194;37542:4;37535:11;;36966:586;36848:704;;;;:::o;37558:264::-;37681:10;37716:100;37812:3;37804:6;37716:100;:::i;:::-;37702:114;;37558:264;;;;:::o;37828:375::-;37909:5;37964:3;37951:17;38056:1;38050:4;38046:12;38035:8;38019:14;38015:29;38011:48;37991:18;37987:73;37977:168;;38064:79;;:::i;:::-;37977:168;38187:8;38167:18;38163:33;38154:42;;37915:288;37828:375;;;;:::o;38209:143::-;38309:4;38341;38336:3;38332:14;38324:22;;38209:143;;;:::o;38428:1096::-;38611:3;38634:112;38739:6;38734:3;38634:112;:::i;:::-;38627:119;;38772:3;38817:4;38809:6;38805:17;38800:3;38796:27;38847:86;38927:5;38847:86;:::i;:::-;38956:7;38987:1;38972:507;38997:6;38994:1;38991:13;38972:507;;;39068:9;39062:4;39058:20;39053:3;39046:33;39113:70;39176:6;39167:7;39113:70;:::i;:::-;39204:118;39317:4;39302:13;39204:118;:::i;:::-;39196:126;;39345:90;39428:6;39345:90;:::i;:::-;39335:100;;39464:4;39459:3;39455:14;39448:21;;39032:447;39019:1;39016;39012:9;39007:14;;38972:507;;;38976:14;39495:4;39488:11;;39515:3;39508:10;;38616:908;;;;38428:1096;;;;;:::o;39530:939::-;39849:4;39887:3;39876:9;39872:19;39864:27;;39901:71;39969:1;39958:9;39954:17;39945:6;39901:71;:::i;:::-;39982:72;40050:2;40039:9;40035:18;40026:6;39982:72;:::i;:::-;40101:9;40095:4;40091:20;40086:2;40075:9;40071:18;40064:48;40129:172;40296:4;40287:6;40279;40129:172;:::i;:::-;40121:180;;40348:9;40342:4;40338:20;40333:2;40322:9;40318:18;40311:48;40376:86;40457:4;40448:6;40440;40376:86;:::i;:::-;40368:94;;39530:939;;;;;;;;;:::o;40475:231::-;40615:34;40611:1;40603:6;40599:14;40592:58;40684:14;40679:2;40671:6;40667:15;40660:39;40475:231;:::o;40712:366::-;40854:3;40875:67;40939:2;40934:3;40875:67;:::i;:::-;40868:74;;40951:93;41040:3;40951:93;:::i;:::-;41069:2;41064:3;41060:12;41053:19;;40712:366;;;:::o;41084:419::-;41250:4;41288:2;41277:9;41273:18;41265:26;;41337:9;41331:4;41327:20;41323:1;41312:9;41308:17;41301:47;41365:131;41491:4;41365:131;:::i;:::-;41357:139;;41084:419;;;:::o;41509:245::-;41649:34;41645:1;41637:6;41633:14;41626:58;41718:28;41713:2;41705:6;41701:15;41694:53;41509:245;:::o;41760:366::-;41902:3;41923:67;41987:2;41982:3;41923:67;:::i;:::-;41916:74;;41999:93;42088:3;41999:93;:::i;:::-;42117:2;42112:3;42108:12;42101:19;;41760:366;;;:::o;42132:419::-;42298:4;42336:2;42325:9;42321:18;42313:26;;42385:9;42379:4;42375:20;42371:1;42360:9;42356:17;42349:47;42413:131;42539:4;42413:131;:::i;:::-;42405:139;;42132:419;;;:::o;42557:230::-;42697:34;42693:1;42685:6;42681:14;42674:58;42766:13;42761:2;42753:6;42749:15;42742:38;42557:230;:::o;42793:366::-;42935:3;42956:67;43020:2;43015:3;42956:67;:::i;:::-;42949:74;;43032:93;43121:3;43032:93;:::i;:::-;43150:2;43145:3;43141:12;43134:19;;42793:366;;;:::o;43165:419::-;43331:4;43369:2;43358:9;43354:18;43346:26;;43418:9;43412:4;43408:20;43404:1;43393:9;43389:17;43382:47;43446:131;43572:4;43446:131;:::i;:::-;43438:139;;43165:419;;;:::o;43590:117::-;43699:1;43696;43689:12;43713:98;43797:6;43792:3;43787;43774:30;43713:98;;;:::o;43847:537::-;43975:3;43996:86;44075:6;44070:3;43996:86;:::i;:::-;43989:93;;44106:66;44098:6;44095:78;44092:165;;;44176:79;;:::i;:::-;44092:165;44288:4;44280:6;44276:17;44266:27;;44303:43;44339:6;44334:3;44327:5;44303:43;:::i;:::-;44371:6;44366:3;44362:16;44355:23;;43847:537;;;;;:::o;44390:720::-;44627:4;44665:2;44654:9;44650:18;44642:26;;44678:71;44746:1;44735:9;44731:17;44722:6;44678:71;:::i;:::-;44796:9;44790:4;44786:20;44781:2;44770:9;44766:18;44759:48;44824:118;44937:4;44928:6;44920;44824:118;:::i;:::-;44816:126;;44989:9;44983:4;44979:20;44974:2;44963:9;44959:18;44952:48;45017:86;45098:4;45089:6;45081;45017:86;:::i;:::-;45009:94;;44390:720;;;;;;;;:::o;45116:180::-;45164:77;45161:1;45154:88;45261:4;45258:1;45251:15;45285:4;45282:1;45275:15;45302:320;45346:6;45383:1;45377:4;45373:12;45363:22;;45430:1;45424:4;45420:12;45451:18;45441:81;;45507:4;45499:6;45495:17;45485:27;;45441:81;45569:2;45561:6;45558:14;45538:18;45535:38;45532:84;;45588:18;;:::i;:::-;45532:84;45353:269;45302:320;;;:::o;45628:79::-;45667:7;45696:5;45685:16;;45628:79;;;:::o;45713:157::-;45818:45;45838:24;45856:5;45838:24;:::i;:::-;45818:45;:::i;:::-;45813:3;45806:58;45713:157;;:::o;45876:256::-;45988:3;46003:75;46074:3;46065:6;46003:75;:::i;:::-;46103:2;46098:3;46094:12;46087:19;;46123:3;46116:10;;45876:256;;;;:::o;46138:147::-;46239:11;46276:3;46261:18;;46138:147;;;;:::o;46291:386::-;46395:3;46423:38;46455:5;46423:38;:::i;:::-;46477:88;46558:6;46553:3;46477:88;:::i;:::-;46470:95;;46574:65;46632:6;46627:3;46620:4;46613:5;46609:16;46574:65;:::i;:::-;46664:6;46659:3;46655:16;46648:23;;46399:278;46291:386;;;;:::o;46683:271::-;46813:3;46835:93;46924:3;46915:6;46835:93;:::i;:::-;46828:100;;46945:3;46938:10;;46683:271;;;;:::o;46960:220::-;47100:34;47096:1;47088:6;47084:14;47077:58;47169:3;47164:2;47156:6;47152:15;47145:28;46960:220;:::o;47186:366::-;47328:3;47349:67;47413:2;47408:3;47349:67;:::i;:::-;47342:74;;47425:93;47514:3;47425:93;:::i;:::-;47543:2;47538:3;47534:12;47527:19;;47186:366;;;:::o;47558:419::-;47724:4;47762:2;47751:9;47747:18;47739:26;;47811:9;47805:4;47801:20;47797:1;47786:9;47782:17;47775:47;47839:131;47965:4;47839:131;:::i;:::-;47831:139;;47558:419;;;:::o;47983:143::-;48040:5;48071:6;48065:13;48056:22;;48087:33;48114:5;48087:33;:::i;:::-;47983:143;;;;:::o;48132:351::-;48202:6;48251:2;48239:9;48230:7;48226:23;48222:32;48219:119;;;48257:79;;:::i;:::-;48219:119;48377:1;48402:64;48458:7;48449:6;48438:9;48434:22;48402:64;:::i;:::-;48392:74;;48348:128;48132:351;;;;:::o;48489:233::-;48629:34;48625:1;48617:6;48613:14;48606:58;48698:16;48693:2;48685:6;48681:15;48674:41;48489:233;:::o;48728:366::-;48870:3;48891:67;48955:2;48950:3;48891:67;:::i;:::-;48884:74;;48967:93;49056:3;48967:93;:::i;:::-;49085:2;49080:3;49076:12;49069:19;;48728:366;;;:::o;49100:419::-;49266:4;49304:2;49293:9;49289:18;49281:26;;49353:9;49347:4;49343:20;49339:1;49328:9;49324:17;49317:47;49381:131;49507:4;49381:131;:::i;:::-;49373:139;;49100:419;;;:::o;49525:231::-;49665:34;49661:1;49653:6;49649:14;49642:58;49734:14;49729:2;49721:6;49717:15;49710:39;49525:231;:::o;49762:366::-;49904:3;49925:67;49989:2;49984:3;49925:67;:::i;:::-;49918:74;;50001:93;50090:3;50001:93;:::i;:::-;50119:2;50114:3;50110:12;50103:19;;49762:366;;;:::o;50134:419::-;50300:4;50338:2;50327:9;50323:18;50315:26;;50387:9;50381:4;50377:20;50373:1;50362:9;50358:17;50351:47;50415:131;50541:4;50415:131;:::i;:::-;50407:139;;50134:419;;;:::o;50559:180::-;50607:77;50604:1;50597:88;50704:4;50701:1;50694:15;50728:4;50725:1;50718:15;50745:295;50885:34;50881:1;50873:6;50869:14;50862:58;50954:34;50949:2;50941:6;50937:15;50930:59;51023:9;51018:2;51010:6;51006:15;50999:34;50745:295;:::o;51046:366::-;51188:3;51209:67;51273:2;51268:3;51209:67;:::i;:::-;51202:74;;51285:93;51374:3;51285:93;:::i;:::-;51403:2;51398:3;51394:12;51387:19;;51046:366;;;:::o;51418:419::-;51584:4;51622:2;51611:9;51607:18;51599:26;;51671:9;51665:4;51661:20;51657:1;51646:9;51642:17;51635:47;51699:131;51825:4;51699:131;:::i;:::-;51691:139;;51418:419;;;:::o;51843:660::-;52048:4;52086:3;52075:9;52071:19;52063:27;;52100:71;52168:1;52157:9;52153:17;52144:6;52100:71;:::i;:::-;52181:72;52249:2;52238:9;52234:18;52225:6;52181:72;:::i;:::-;52263;52331:2;52320:9;52316:18;52307:6;52263:72;:::i;:::-;52382:9;52376:4;52372:20;52367:2;52356:9;52352:18;52345:48;52410:86;52491:4;52482:6;52474;52410:86;:::i;:::-;52402:94;;51843:660;;;;;;;;:::o;52509:332::-;52630:4;52668:2;52657:9;52653:18;52645:26;;52681:71;52749:1;52738:9;52734:17;52725:6;52681:71;:::i;:::-;52762:72;52830:2;52819:9;52815:18;52806:6;52762:72;:::i;:::-;52509:332;;;;;:::o;52847:410::-;52887:7;52910:20;52928:1;52910:20;:::i;:::-;52905:25;;52944:20;52962:1;52944:20;:::i;:::-;52939:25;;52999:1;52996;52992:9;53021:30;53039:11;53021:30;:::i;:::-;53010:41;;53200:1;53191:7;53187:15;53184:1;53181:22;53161:1;53154:9;53134:83;53111:139;;53230:18;;:::i;:::-;53111:139;52895:362;52847:410;;;;:::o;53263:332::-;53384:4;53422:2;53411:9;53407:18;53399:26;;53435:71;53503:1;53492:9;53488:17;53479:6;53435:71;:::i;:::-;53516:72;53584:2;53573:9;53569:18;53560:6;53516:72;:::i;:::-;53263:332;;;;;:::o;53601:117::-;53710:1;53707;53700:12;53847:90;53882:7;53925:5;53922:1;53911:20;53900:31;;53847:90;;;:::o;53943:118::-;54014:22;54030:5;54014:22;:::i;:::-;54007:5;54004:33;53994:61;;54051:1;54048;54041:12;53994:61;53943:118;:::o;54067:139::-;54122:5;54153:6;54147:13;54138:22;;54169:31;54194:5;54169:31;:::i;:::-;54067:139;;;;:::o;54212:120::-;54284:23;54301:5;54284:23;:::i;:::-;54277:5;54274:34;54264:62;;54322:1;54319;54312:12;54264:62;54212:120;:::o;54338:141::-;54394:5;54425:6;54419:13;54410:22;;54441:32;54467:5;54441:32;:::i;:::-;54338:141;;;;:::o;54485:118::-;54556:22;54572:5;54556:22;:::i;:::-;54549:5;54546:33;54536:61;;54593:1;54590;54583:12;54536:61;54485:118;:::o;54609:139::-;54664:5;54695:6;54689:13;54680:22;;54711:31;54736:5;54711:31;:::i;:::-;54609:139;;;;:::o;54786:952::-;54869:5;54913:4;54901:9;54896:3;54892:19;54888:30;54885:117;;;54921:79;;:::i;:::-;54885:117;55020:21;55036:4;55020:21;:::i;:::-;55011:30;;55101:1;55141:58;55195:3;55186:6;55175:9;55171:22;55141:58;:::i;:::-;55134:4;55127:5;55123:16;55116:84;55051:160;55270:2;55311:59;55366:3;55357:6;55346:9;55342:22;55311:59;:::i;:::-;55304:4;55297:5;55293:16;55286:85;55221:161;55441:2;55482:58;55536:3;55527:6;55516:9;55512:22;55482:58;:::i;:::-;55475:4;55468:5;55464:16;55457:84;55392:160;55618:2;55659:60;55715:3;55706:6;55695:9;55691:22;55659:60;:::i;:::-;55652:4;55645:5;55641:16;55634:86;55562:169;54786:952;;;;:::o;55744:398::-;55837:6;55886:3;55874:9;55865:7;55861:23;55857:33;55854:120;;;55893:79;;:::i;:::-;55854:120;56013:1;56038:87;56117:7;56108:6;56097:9;56093:22;56038:87;:::i;:::-;56028:97;;55984:151;55744:398;;;;:::o;56148:235::-;56288:34;56284:1;56276:6;56272:14;56265:58;56357:18;56352:2;56344:6;56340:15;56333:43;56148:235;:::o;56389:366::-;56531:3;56552:67;56616:2;56611:3;56552:67;:::i;:::-;56545:74;;56628:93;56717:3;56628:93;:::i;:::-;56746:2;56741:3;56737:12;56730:19;;56389:366;;;:::o;56761:419::-;56927:4;56965:2;56954:9;56950:18;56942:26;;57014:9;57008:4;57004:20;57000:1;56989:9;56985:17;56978:47;57042:131;57168:4;57042:131;:::i;:::-;57034:139;;56761:419;;;:::o;57186:233::-;57326:34;57322:1;57314:6;57310:14;57303:58;57395:16;57390:2;57382:6;57378:15;57371:41;57186:233;:::o;57425:366::-;57567:3;57588:67;57652:2;57647:3;57588:67;:::i;:::-;57581:74;;57664:93;57753:3;57664:93;:::i;:::-;57782:2;57777:3;57773:12;57766:19;;57425:366;;;:::o;57797:419::-;57963:4;58001:2;57990:9;57986:18;57978:26;;58050:9;58044:4;58040:20;58036:1;58025:9;58021:17;58014:47;58078:131;58204:4;58078:131;:::i;:::-;58070:139;;57797:419;;;:::o;58222:251::-;58362:34;58358:1;58350:6;58346:14;58339:58;58431:34;58426:2;58418:6;58414:15;58407:59;58222:251;:::o;58479:366::-;58621:3;58642:67;58706:2;58701:3;58642:67;:::i;:::-;58635:74;;58718:93;58807:3;58718:93;:::i;:::-;58836:2;58831:3;58827:12;58820:19;;58479:366;;;:::o;58851:419::-;59017:4;59055:2;59044:9;59040:18;59032:26;;59104:9;59098:4;59094:20;59090:1;59079:9;59075:17;59068:47;59132:131;59258:4;59132:131;:::i;:::-;59124:139;;58851:419;;;:::o;59276:176::-;59416:28;59412:1;59404:6;59400:14;59393:52;59276:176;:::o;59458:366::-;59600:3;59621:67;59685:2;59680:3;59621:67;:::i;:::-;59614:74;;59697:93;59786:3;59697:93;:::i;:::-;59815:2;59810:3;59806:12;59799:19;;59458:366;;;:::o;59830:419::-;59996:4;60034:2;60023:9;60019:18;60011:26;;60083:9;60077:4;60073:20;60069:1;60058:9;60054:17;60047:47;60111:131;60237:4;60111:131;:::i;:::-;60103:139;;59830:419;;;:::o;60255:223::-;60395:34;60391:1;60383:6;60379:14;60372:58;60464:6;60459:2;60451:6;60447:15;60440:31;60255:223;:::o;60484:366::-;60626:3;60647:67;60711:2;60706:3;60647:67;:::i;:::-;60640:74;;60723:93;60812:3;60723:93;:::i;:::-;60841:2;60836:3;60832:12;60825:19;;60484:366;;;:::o;60856:419::-;61022:4;61060:2;61049:9;61045:18;61037:26;;61109:9;61103:4;61099:20;61095:1;61084:9;61080:17;61073:47;61137:131;61263:4;61137:131;:::i;:::-;61129:139;;60856:419;;;:::o;61281:549::-;61458:4;61496:2;61485:9;61481:18;61473:26;;61509:71;61577:1;61566:9;61562:17;61553:6;61509:71;:::i;:::-;61590:72;61658:2;61647:9;61643:18;61634:6;61590:72;:::i;:::-;61709:9;61703:4;61699:20;61694:2;61683:9;61679:18;61672:48;61737:86;61818:4;61809:6;61801;61737:86;:::i;:::-;61729:94;;61281:549;;;;;;;:::o;61836:230::-;61976:34;61972:1;61964:6;61960:14;61953:58;62045:13;62040:2;62032:6;62028:15;62021:38;61836:230;:::o;62072:366::-;62214:3;62235:67;62299:2;62294:3;62235:67;:::i;:::-;62228:74;;62311:93;62400:3;62311:93;:::i;:::-;62429:2;62424:3;62420:12;62413:19;;62072:366;;;:::o;62444:419::-;62610:4;62648:2;62637:9;62633:18;62625:26;;62697:9;62691:4;62687:20;62683:1;62672:9;62668:17;62661:47;62725:131;62851:4;62725:131;:::i;:::-;62717:139;;62444:419;;;:::o;62869:165::-;63009:17;63005:1;62997:6;62993:14;62986:41;62869:165;:::o;63040:366::-;63182:3;63203:67;63267:2;63262:3;63203:67;:::i;:::-;63196:74;;63279:93;63368:3;63279:93;:::i;:::-;63397:2;63392:3;63388:12;63381:19;;63040:366;;;:::o;63412:419::-;63578:4;63616:2;63605:9;63601:18;63593:26;;63665:9;63659:4;63655:20;63651:1;63640:9;63636:17;63629:47;63693:131;63819:4;63693:131;:::i;:::-;63685:139;;63412:419;;;:::o;63837:161::-;63977:13;63973:1;63965:6;63961:14;63954:37;63837:161;:::o;64004:366::-;64146:3;64167:67;64231:2;64226:3;64167:67;:::i;:::-;64160:74;;64243:93;64332:3;64243:93;:::i;:::-;64361:2;64356:3;64352:12;64345:19;;64004:366;;;:::o;64376:419::-;64542:4;64580:2;64569:9;64565:18;64557:26;;64629:9;64623:4;64619:20;64615:1;64604:9;64600:17;64593:47;64657:131;64783:4;64657:131;:::i;:::-;64649:139;;64376:419;;;:::o;64801:172::-;64941:24;64937:1;64929:6;64925:14;64918:48;64801:172;:::o;64979:366::-;65121:3;65142:67;65206:2;65201:3;65142:67;:::i;:::-;65135:74;;65218:93;65307:3;65218:93;:::i;:::-;65336:2;65331:3;65327:12;65320:19;;64979:366;;;:::o;65351:419::-;65517:4;65555:2;65544:9;65540:18;65532:26;;65604:9;65598:4;65594:20;65590:1;65579:9;65575:17;65568:47;65632:131;65758:4;65632:131;:::i;:::-;65624:139;;65351:419;;;:::o;65776:96::-;65810:8;65859:5;65854:3;65850:15;65829:36;;65776:96;;;:::o;65878:94::-;65916:7;65945:21;65960:5;65945:21;:::i;:::-;65934:32;;65878:94;;;:::o;65978:153::-;66081:43;66100:23;66117:5;66100:23;:::i;:::-;66081:43;:::i;:::-;66076:3;66069:56;65978:153;;:::o;66137:533::-;66303:3;66318:75;66389:3;66380:6;66318:75;:::i;:::-;66418:2;66413:3;66409:12;66402:19;;66431:75;66502:3;66493:6;66431:75;:::i;:::-;66531:2;66526:3;66522:12;66515:19;;66544:73;66613:3;66604:6;66544:73;:::i;:::-;66642:1;66637:3;66633:11;66626:18;;66661:3;66654:10;;66137:533;;;;;;:::o;66676:180::-;66724:77;66721:1;66714:88;66821:4;66818:1;66811:15;66845:4;66842:1;66835:15;66862:176;66894:1;66911:20;66929:1;66911:20;:::i;:::-;66906:25;;66945:20;66963:1;66945:20;:::i;:::-;66940:25;;66984:1;66974:35;;66989:18;;:::i;:::-;66974:35;67030:1;67027;67023:9;67018:14;;66862:176;;;;:::o;67044:391::-;67135:4;67189:11;67176:25;67289:1;67283:4;67279:12;67268:8;67252:14;67248:29;67244:48;67224:18;67220:73;67210:168;;67297:79;;:::i;:::-;67210:168;67409:18;67399:8;67395:33;67387:41;;67140:295;67044:391;;;;:::o;67441:740::-;67534:4;67540:6;67596:11;67583:25;67696:1;67690:4;67686:12;67675:8;67659:14;67655:29;67651:48;67631:18;67627:73;67617:168;;67704:79;;:::i;:::-;67617:168;67816:18;67806:8;67802:33;67794:41;;67868:4;67855:18;67845:28;;67896:18;67888:6;67885:30;67882:117;;;67918:79;;:::i;:::-;67882:117;68026:2;68020:4;68016:13;68008:21;;68083:4;68075:6;68071:17;68055:14;68051:38;68045:4;68041:49;68038:136;;;68093:79;;:::i;:::-;68038:136;67547:634;67441:740;;;;;:::o;68187:170::-;68327:22;68323:1;68315:6;68311:14;68304:46;68187:170;:::o;68363:366::-;68505:3;68526:67;68590:2;68585:3;68526:67;:::i;:::-;68519:74;;68602:93;68691:3;68602:93;:::i;:::-;68720:2;68715:3;68711:12;68704:19;;68363:366;;;:::o;68735:419::-;68901:4;68939:2;68928:9;68924:18;68916:26;;68988:9;68982:4;68978:20;68974:1;68963:9;68959:17;68952:47;69016:131;69142:4;69016:131;:::i;:::-;69008:139;;68735:419;;;:::o;69160:553::-;69337:4;69375:3;69364:9;69360:19;69352:27;;69389:71;69457:1;69446:9;69442:17;69433:6;69389:71;:::i;:::-;69470:72;69538:2;69527:9;69523:18;69514:6;69470:72;:::i;:::-;69552;69620:2;69609:9;69605:18;69596:6;69552:72;:::i;:::-;69634;69702:2;69691:9;69687:18;69678:6;69634:72;:::i;:::-;69160:553;;;;;;;:::o;69719:85::-;69764:7;69793:5;69782:16;;69719:85;;;:::o;69810:156::-;69867:9;69900:60;69917:42;69926:32;69952:5;69926:32;:::i;:::-;69917:42;:::i;:::-;69900:60;:::i;:::-;69887:73;;69810:156;;;:::o;69972:145::-;70066:44;70104:5;70066:44;:::i;:::-;70061:3;70054:57;69972:145;;:::o;70123:236::-;70223:4;70261:2;70250:9;70246:18;70238:26;;70274:78;70349:1;70338:9;70334:17;70325:6;70274:78;:::i;:::-;70123:236;;;;:::o;70365:174::-;70505:26;70501:1;70493:6;70489:14;70482:50;70365:174;:::o;70545:366::-;70687:3;70708:67;70772:2;70767:3;70708:67;:::i;:::-;70701:74;;70784:93;70873:3;70784:93;:::i;:::-;70902:2;70897:3;70893:12;70886:19;;70545:366;;;:::o;70917:419::-;71083:4;71121:2;71110:9;71106:18;71098:26;;71170:9;71164:4;71160:20;71156:1;71145:9;71141:17;71134:47;71198:131;71324:4;71198:131;:::i;:::-;71190:139;;70917:419;;;:::o;71342:171::-;71381:3;71404:24;71422:5;71404:24;:::i;:::-;71395:33;;71450:4;71443:5;71440:15;71437:41;;71458:18;;:::i;:::-;71437:41;71505:1;71498:5;71494:13;71487:20;;71342:171;;;:::o;71519:185::-;71559:1;71576:20;71594:1;71576:20;:::i;:::-;71571:25;;71610:20;71628:1;71610:20;:::i;:::-;71605:25;;71649:1;71639:35;;71654:18;;:::i;:::-;71639:35;71696:1;71693;71689:9;71684:14;;71519:185;;;;:::o;71710:170::-;71850:22;71846:1;71838:6;71834:14;71827:46;71710:170;:::o;71886:366::-;72028:3;72049:67;72113:2;72108:3;72049:67;:::i;:::-;72042:74;;72125:93;72214:3;72125:93;:::i;:::-;72243:2;72238:3;72234:12;72227:19;;71886:366;;;:::o;72258:419::-;72424:4;72462:2;72451:9;72447:18;72439:26;;72511:9;72505:4;72501:20;72497:1;72486:9;72482:17;72475:47;72539:131;72665:4;72539:131;:::i;:::-;72531:139;;72258:419;;;:::o;72683:114::-;;:::o;72803:398::-;72962:3;72983:83;73064:1;73059:3;72983:83;:::i;:::-;72976:90;;73075:93;73164:3;73075:93;:::i;:::-;73193:1;73188:3;73184:11;73177:18;;72803:398;;;:::o;73207:379::-;73391:3;73413:147;73556:3;73413:147;:::i;:::-;73406:154;;73577:3;73570:10;;73207:379;;;:::o;73592:161::-;73732:13;73728:1;73720:6;73716:14;73709:37;73592:161;:::o;73759:366::-;73901:3;73922:67;73986:2;73981:3;73922:67;:::i;:::-;73915:74;;73998:93;74087:3;73998:93;:::i;:::-;74116:2;74111:3;74107:12;74100:19;;73759:366;;;:::o;74131:419::-;74297:4;74335:2;74324:9;74320:18;74312:26;;74384:9;74378:4;74374:20;74370:1;74359:9;74355:17;74348:47;74412:131;74538:4;74412:131;:::i;:::-;74404:139;;74131:419;;;:::o;74556:179::-;74696:31;74692:1;74684:6;74680:14;74673:55;74556:179;:::o;74741:366::-;74883:3;74904:67;74968:2;74963:3;74904:67;:::i;:::-;74897:74;;74980:93;75069:3;74980:93;:::i;:::-;75098:2;75093:3;75089:12;75082:19;;74741:366;;;:::o;75113:529::-;75307:4;75345:2;75334:9;75330:18;75322:26;;75358:71;75426:1;75415:9;75411:17;75402:6;75358:71;:::i;:::-;75476:9;75470:4;75466:20;75461:2;75450:9;75446:18;75439:48;75504:131;75630:4;75504:131;:::i;:::-;75496:139;;75113:529;;;;:::o;75648:177::-;75788:29;75784:1;75776:6;75772:14;75765:53;75648:177;:::o;75831:366::-;75973:3;75994:67;76058:2;76053:3;75994:67;:::i;:::-;75987:74;;76070:93;76159:3;76070:93;:::i;:::-;76188:2;76183:3;76179:12;76172:19;;75831:366;;;:::o;76203:529::-;76397:4;76435:2;76424:9;76420:18;76412:26;;76448:71;76516:1;76505:9;76501:17;76492:6;76448:71;:::i;:::-;76566:9;76560:4;76556:20;76551:2;76540:9;76536:18;76529:48;76594:131;76720:4;76594:131;:::i;:::-;76586:139;;76203:529;;;;:::o;76738:182::-;76878:34;76874:1;76866:6;76862:14;76855:58;76738:182;:::o;76926:366::-;77068:3;77089:67;77153:2;77148:3;77089:67;:::i;:::-;77082:74;;77165:93;77254:3;77165:93;:::i;:::-;77283:2;77278:3;77274:12;77267:19;;76926:366;;;:::o;77298:529::-;77492:4;77530:2;77519:9;77515:18;77507:26;;77543:71;77611:1;77600:9;77596:17;77587:6;77543:71;:::i;:::-;77661:9;77655:4;77651:20;77646:2;77635:9;77631:18;77624:48;77689:131;77815:4;77689:131;:::i;:::-;77681:139;;77298:529;;;;:::o;77833:233::-;77872:3;77895:24;77913:5;77895:24;:::i;:::-;77886:33;;77941:66;77934:5;77931:77;77928:103;;78011:18;;:::i;:::-;77928:103;78058:1;78051:5;78047:13;78040:20;;77833:233;;;:::o;78258:724::-;78335:4;78341:6;78397:11;78384:25;78497:1;78491:4;78487:12;78476:8;78460:14;78456:29;78452:48;78432:18;78428:73;78418:168;;78505:79;;:::i;:::-;78418:168;78617:18;78607:8;78603:33;78595:41;;78669:4;78656:18;78646:28;;78697:18;78689:6;78686:30;78683:117;;;78719:79;;:::i;:::-;78683:117;78827:2;78821:4;78817:13;78809:21;;78884:4;78876:6;78872:17;78856:14;78852:38;78846:4;78842:49;78839:136;;;78894:79;;:::i;:::-;78839:136;78348:634;78258:724;;;;;:::o;78988:96::-;79046:6;79074:3;79064:13;;78988:96;;;;:::o;79090:140::-;79138:4;79161:3;79153:11;;79184:3;79181:1;79174:14;79218:4;79215:1;79205:18;79197:26;;79090:140;;;:::o;79236:93::-;79273:6;79320:2;79315;79308:5;79304:14;79300:23;79290:33;;79236:93;;;:::o;79335:107::-;79379:8;79429:5;79423:4;79419:16;79398:37;;79335:107;;;;:::o;79448:393::-;79517:6;79567:1;79555:10;79551:18;79590:97;79620:66;79609:9;79590:97;:::i;:::-;79708:39;79738:8;79727:9;79708:39;:::i;:::-;79696:51;;79780:4;79776:9;79769:5;79765:21;79756:30;;79829:4;79819:8;79815:19;79808:5;79805:30;79795:40;;79524:317;;79448:393;;;;;:::o;79847:142::-;79897:9;79930:53;79948:34;79957:24;79975:5;79957:24;:::i;:::-;79948:34;:::i;:::-;79930:53;:::i;:::-;79917:66;;79847:142;;;:::o;79995:75::-;80038:3;80059:5;80052:12;;79995:75;;;:::o;80076:269::-;80186:39;80217:7;80186:39;:::i;:::-;80247:91;80296:41;80320:16;80296:41;:::i;:::-;80288:6;80281:4;80275:11;80247:91;:::i;:::-;80241:4;80234:105;80152:193;80076:269;;;:::o;80351:73::-;80396:3;80351:73;:::o;80430:189::-;80507:32;;:::i;:::-;80548:65;80606:6;80598;80592:4;80548:65;:::i;:::-;80483:136;80430:189;;:::o;80625:186::-;80685:120;80702:3;80695:5;80692:14;80685:120;;;80756:39;80793:1;80786:5;80756:39;:::i;:::-;80729:1;80722:5;80718:13;80709:22;;80685:120;;;80625:186;;:::o;80817:541::-;80917:2;80912:3;80909:11;80906:445;;;80951:37;80982:5;80951:37;:::i;:::-;81034:29;81052:10;81034:29;:::i;:::-;81024:8;81020:44;81217:2;81205:10;81202:18;81199:49;;;81238:8;81223:23;;81199:49;81261:80;81317:22;81335:3;81317:22;:::i;:::-;81307:8;81303:37;81290:11;81261:80;:::i;:::-;80921:430;;80906:445;80817:541;;;:::o;81364:117::-;81418:8;81468:5;81462:4;81458:16;81437:37;;81364:117;;;;:::o;81487:169::-;81531:6;81564:51;81612:1;81608:6;81600:5;81597:1;81593:13;81564:51;:::i;:::-;81560:56;81645:4;81639;81635:15;81625:25;;81538:118;81487:169;;;;:::o;81661:295::-;81737:4;81883:29;81908:3;81902:4;81883:29;:::i;:::-;81875:37;;81945:3;81942:1;81938:11;81932:4;81929:21;81921:29;;81661:295;;;;:::o;81961:1398::-;82083:43;82122:3;82117;82083:43;:::i;:::-;82191:18;82183:6;82180:30;82177:56;;;82213:18;;:::i;:::-;82177:56;82257:38;82289:4;82283:11;82257:38;:::i;:::-;82342:66;82401:6;82393;82387:4;82342:66;:::i;:::-;82435:1;82464:2;82456:6;82453:14;82481:1;82476:631;;;;83151:1;83168:6;83165:84;;;83224:9;83219:3;83215:19;83202:33;83193:42;;83165:84;83275:67;83335:6;83328:5;83275:67;:::i;:::-;83269:4;83262:81;83124:229;82446:907;;82476:631;82528:4;82524:9;82516:6;82512:22;82562:36;82593:4;82562:36;:::i;:::-;82620:1;82634:215;82648:7;82645:1;82642:14;82634:215;;;82734:9;82729:3;82725:19;82712:33;82704:6;82697:49;82785:1;82777:6;82773:14;82763:24;;82832:2;82821:9;82817:18;82804:31;;82671:4;82668:1;82664:12;82659:17;;82634:215;;;82877:6;82868:7;82865:19;82862:186;;;82942:9;82937:3;82933:19;82920:33;82985:48;83027:4;83019:6;83015:17;83004:9;82985:48;:::i;:::-;82977:6;82970:64;82885:163;82862:186;83094:1;83090;83082:6;83078:14;83074:22;83068:4;83061:36;82483:624;;;82446:907;;82058:1301;;;81961:1398;;;:::o;83365:214::-;83478:95;83565:7;83556;83550:4;83478:95;:::i;:::-;83365:214;;;:::o;83585:483::-;83753:1;83747:4;83743:12;83799:1;83792:5;83788:13;83864:62;83913:12;83906:5;83864:62;:::i;:::-;83940:110;84036:13;84021;84009:10;83940:110;:::i;:::-;83710:351;;;;83585:483;;:::o;84074:240::-;84202:106;84300:7;84294:4;84202:106;:::i;:::-;84074:240;;:::o;84320:247::-;84460:34;84456:1;84448:6;84444:14;84437:58;84529:30;84524:2;84516:6;84512:15;84505:55;84320:247;:::o;84573:366::-;84715:3;84736:67;84800:2;84795:3;84736:67;:::i;:::-;84729:74;;84812:93;84901:3;84812:93;:::i;:::-;84930:2;84925:3;84921:12;84914:19;;84573:366;;;:::o;84945:419::-;85111:4;85149:2;85138:9;85134:18;85126:26;;85198:9;85192:4;85188:20;85184:1;85173:9;85169:17;85162:47;85226:131;85352:4;85226:131;:::i;:::-;85218:139;;84945:419;;;:::o;85370:243::-;85510:34;85506:1;85498:6;85494:14;85487:58;85579:26;85574:2;85566:6;85562:15;85555:51;85370:243;:::o;85619:366::-;85761:3;85782:67;85846:2;85841:3;85782:67;:::i;:::-;85775:74;;85858:93;85947:3;85858:93;:::i;:::-;85976:2;85971:3;85967:12;85960:19;;85619:366;;;:::o;85991:419::-;86157:4;86195:2;86184:9;86180:18;86172:26;;86244:9;86238:4;86234:20;86230:1;86219:9;86215:17;86208:47;86272:131;86398:4;86272:131;:::i;:::-;86264:139;;85991:419;;;:::o;86416:238::-;86556:34;86552:1;86544:6;86540:14;86533:58;86625:21;86620:2;86612:6;86608:15;86601:46;86416:238;:::o;86660:366::-;86802:3;86823:67;86887:2;86882:3;86823:67;:::i;:::-;86816:74;;86899:93;86988:3;86899:93;:::i;:::-;87017:2;87012:3;87008:12;87001:19;;86660:366;;;:::o;87032:419::-;87198:4;87236:2;87225:9;87221:18;87213:26;;87285:9;87279:4;87275:20;87271:1;87260:9;87256:17;87249:47;87313:131;87439:4;87313:131;:::i;:::-;87305:139;;87032:419;;;:::o;87457:102::-;87499:8;87546:5;87543:1;87539:13;87518:34;;87457:102;;;:::o;87565:848::-;87626:5;87633:4;87657:6;87648:15;;87681:5;87672:14;;87695:712;87716:1;87706:8;87703:15;87695:712;;;87811:4;87806:3;87802:14;87796:4;87793:24;87790:50;;;87820:18;;:::i;:::-;87790:50;87870:1;87860:8;87856:16;87853:451;;;88285:4;88278:5;88274:16;88265:25;;87853:451;88335:4;88329;88325:15;88317:23;;88365:32;88388:8;88365:32;:::i;:::-;88353:44;;87695:712;;;87565:848;;;;;;;:::o;88419:1073::-;88473:5;88664:8;88654:40;;88685:1;88676:10;;88687:5;;88654:40;88713:4;88703:36;;88730:1;88721:10;;88732:5;;88703:36;88799:4;88847:1;88842:27;;;;88883:1;88878:191;;;;88792:277;;88842:27;88860:1;88851:10;;88862:5;;;88878:191;88923:3;88913:8;88910:17;88907:43;;;88930:18;;:::i;:::-;88907:43;88979:8;88976:1;88972:16;88963:25;;89014:3;89007:5;89004:14;89001:40;;;89021:18;;:::i;:::-;89001:40;89054:5;;;88792:277;;89178:2;89168:8;89165:16;89159:3;89153:4;89150:13;89146:36;89128:2;89118:8;89115:16;89110:2;89104:4;89101:12;89097:35;89081:111;89078:246;;;89234:8;89228:4;89224:19;89215:28;;89269:3;89262:5;89259:14;89256:40;;;89276:18;;:::i;:::-;89256:40;89309:5;;89078:246;89349:42;89387:3;89377:8;89371:4;89368:1;89349:42;:::i;:::-;89334:57;;;;89423:4;89418:3;89414:14;89407:5;89404:25;89401:51;;;89432:18;;:::i;:::-;89401:51;89481:4;89474:5;89470:16;89461:25;;88419:1073;;;;;;:::o;89498:93::-;89534:7;89574:10;89567:5;89563:22;89552:33;;89498:93;;;:::o;89597:283::-;89656:5;89680:23;89698:4;89680:23;:::i;:::-;89672:31;;89724:26;89741:8;89724:26;:::i;:::-;89712:38;;89769:104;89806:66;89796:8;89790:4;89769:104;:::i;:::-;89760:113;;89597:283;;;;:::o;89886:226::-;89920:3;89943:22;89959:5;89943:22;:::i;:::-;89934:31;;89987:66;89980:5;89977:77;89974:103;;90057:18;;:::i;:::-;89974:103;90100:5;90097:1;90093:13;90086:20;;89886:226;;;:::o;90118:122::-;90191:24;90209:5;90191:24;:::i;:::-;90184:5;90181:35;90171:63;;90230:1;90227;90220:12;90171:63;90118:122;:::o;90246:143::-;90303:5;90334:6;90328:13;90319:22;;90350:33;90377:5;90350:33;:::i;:::-;90246:143;;;;:::o;90395:351::-;90465:6;90514:2;90502:9;90493:7;90489:23;90485:32;90482:119;;;90520:79;;:::i;:::-;90482:119;90640:1;90665:64;90721:7;90712:6;90701:9;90697:22;90665:64;:::i;:::-;90655:74;;90611:128;90395:351;;;;:::o;90752:171::-;90892:23;90888:1;90880:6;90876:14;90869:47;90752:171;:::o;90929:366::-;91071:3;91092:67;91156:2;91151:3;91092:67;:::i;:::-;91085:74;;91168:93;91257:3;91168:93;:::i;:::-;91286:2;91281:3;91277:12;91270:19;;90929:366;;;:::o;91301:419::-;91467:4;91505:2;91494:9;91490:18;91482:26;;91554:9;91548:4;91544:20;91540:1;91529:9;91525:17;91518:47;91582:131;91708:4;91582:131;:::i;:::-;91574:139;;91301:419;;;:::o;91726:116::-;91777:4;91800:3;91792:11;;91830:4;91825:3;91821:14;91813:22;;91726:116;;;:::o;91848:154::-;91891:11;91927:29;91951:3;91945:10;91927:29;:::i;:::-;91990:5;91966:29;;91903:99;91848:154;;;:::o;92008:594::-;92092:5;92123:38;92155:5;92123:38;:::i;:::-;92186:5;92213:40;92247:5;92213:40;:::i;:::-;92201:52;;92272:35;92298:8;92272:35;:::i;:::-;92263:44;;92331:2;92323:6;92320:14;92317:278;;;92402:169;92487:66;92457:6;92453:2;92449:15;92446:1;92442:23;92402:169;:::i;:::-;92379:5;92358:227;92349:236;;92317:278;92098:504;;92008:594;;;:::o;92608:430::-;92751:4;92789:2;92778:9;92774:18;92766:26;;92802:71;92870:1;92859:9;92855:17;92846:6;92802:71;:::i;:::-;92883:70;92949:2;92938:9;92934:18;92925:6;92883:70;:::i;:::-;92963:68;93027:2;93016:9;93012:18;93003:6;92963:68;:::i;:::-;92608:430;;;;;;:::o;93044:221::-;93184:34;93180:1;93172:6;93168:14;93161:58;93253:4;93248:2;93240:6;93236:15;93229:29;93044:221;:::o;93271:366::-;93413:3;93434:67;93498:2;93493:3;93434:67;:::i;:::-;93427:74;;93510:93;93599:3;93510:93;:::i;:::-;93628:2;93623:3;93619:12;93612:19;;93271:366;;;:::o;93643:419::-;93809:4;93847:2;93836:9;93832:18;93824:26;;93896:9;93890:4;93886:20;93882:1;93871:9;93867:17;93860:47;93924:131;94050:4;93924:131;:::i;:::-;93916:139;;93643:419;;;:::o;94068:76::-;94104:7;94133:5;94122:16;;94068:76;;;:::o;94150:228::-;94185:3;94208:23;94225:5;94208:23;:::i;:::-;94199:32;;94253:66;94246:5;94243:77;94240:103;;94323:18;;:::i;:::-;94240:103;94366:5;94363:1;94359:13;94352:20;;94150:228;;;:::o","linkReferences":{},"immutableReferences":{"468":[{"start":15849,"length":32},{"start":15934,"length":32},{"start":16376,"length":32}]}},"methodIdentifiers":{"BURN_ACTOR()":"0a6a63f1","EXTRA_DATA_MAX_SIZE()":"029b4646","FIL_USD_PRICE_FEED_ID()":"19c75950","LEAF_SIZE()":"c0e15949","MAX_ENQUEUED_REMOVALS()":"9f8cb3bd","MAX_ROOT_SIZE()":"16e2bcd5","NO_CHALLENGE_SCHEDULED()":"462dd449","NO_PROVEN_EPOCH()":"f178b1be","PYTH()":"67e406d5","RANDOMNESS_PRECOMPILE()":"15b17570","SECONDS_IN_DAY()":"61a52a36","UPGRADE_INTERFACE_VERSION()":"ad3cb1cc","addRoots(uint256,((bytes),uint256)[],bytes)":"11c0ee4a","calculateProofFee(uint256,uint256)":"4903704a","claimProofSetOwnership(uint256)":"ee3dac65","createProofSet(address,bytes)":"0a4d7932","deleteProofSet(uint256,bytes)":"847d1d06","findRootIds(uint256,uint256[])":"0528a55b","getChallengeFinality()":"f83758fe","getChallengeRange(uint256)":"89208ba9","getFILUSDPrice()":"4fa27920","getNextChallengeEpoch(uint256)":"6ba4608f","getNextProofSetId()":"8ea417e5","getNextRootId(uint256)":"d49245c1","getProofSetLastProvenEpoch(uint256)":"faa67163","getProofSetLeafCount(uint256)":"3f84135f","getProofSetListener(uint256)":"31601226","getProofSetOwner(uint256)":"4726075b","getRandomness(uint256)":"453f4f62","getRootCid(uint256,uint256)":"3b7ae913","getRootLeafCount(uint256,uint256)":"9153e64b","getScheduledRemovals(uint256)":"6fa44692","initialize(uint256)":"fe4b84df","nextProvingPeriod(uint256,uint256,bytes)":"45c0b92d","owner()":"8da5cb5b","proofSetLive(uint256)":"f5cac1ba","proposeProofSetOwner(uint256,address)":"6cb55c16","provePossession(uint256,(bytes32,bytes32[])[])":"f58f952b","proxiableUUID()":"52d1902d","renounceOwnership()":"715018a6","rootChallengable(uint256,uint256)":"71cf2a16","rootLive(uint256,uint256)":"47331050","scheduleRemovals(uint256,uint256[],bytes)":"3b68e4e9","transferOwnership(address)":"f2fde38b","upgradeToAndCall(address,bytes)":"4f1ef286"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.23+commit.f704f362\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"AddressEmptyCode\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"ERC1967InvalidImplementation\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ERC1967NonPayable\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FailedCall\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"idx\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"msg\",\"type\":\"string\"}],\"name\":\"IndexedError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidInitialization\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotInitializing\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"OwnableInvalidOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"OwnableUnauthorizedAccount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UUPSUnauthorizedCallContext\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"slot\",\"type\":\"bytes32\"}],\"name\":\"UUPSUnsupportedProxiableUUID\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Debug\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"version\",\"type\":\"uint64\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"challengeEpoch\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"leafCount\",\"type\":\"uint256\"}],\"name\":\"NextProvingPeriod\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"offset\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"struct PDPVerifier.RootIdAndOffset[]\",\"name\":\"challenges\",\"type\":\"tuple[]\"}],\"name\":\"PossessionProven\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"price\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"int32\",\"name\":\"expo\",\"type\":\"int32\"}],\"name\":\"ProofFeePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"ProofSetCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"deletedLeafCount\",\"type\":\"uint256\"}],\"name\":\"ProofSetDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"ProofSetEmpty\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"ProofSetOwnerChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"rootIds\",\"type\":\"uint256[]\"}],\"name\":\"RootsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"rootIds\",\"type\":\"uint256[]\"}],\"name\":\"RootsRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"Upgraded\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BURN_ACTOR\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"EXTRA_DATA_MAX_SIZE\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"FIL_USD_PRICE_FEED_ID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LEAF_SIZE\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_ENQUEUED_REMOVALS\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_ROOT_SIZE\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"NO_CHALLENGE_SCHEDULED\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"NO_PROVEN_EPOCH\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PYTH\",\"outputs\":[{\"internalType\":\"contract IPyth\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"RANDOMNESS_PRECOMPILE\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SECONDS_IN_DAY\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"UPGRADE_INTERFACE_VERSION\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"struct Cids.Cid\",\"name\":\"root\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"rawSize\",\"type\":\"uint256\"}],\"internalType\":\"struct PDPVerifier.RootData[]\",\"name\":\"rootData\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"addRoots\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"estimatedGasFee\",\"type\":\"uint256\"}],\"name\":\"calculateProofFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"claimProofSetOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"listenerAddr\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"createProofSet\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"deleteProofSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"leafIndexs\",\"type\":\"uint256[]\"}],\"name\":\"findRootIds\",\"outputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"offset\",\"type\":\"uint256\"}],\"internalType\":\"struct PDPVerifier.RootIdAndOffset[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChallengeFinality\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getChallengeRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFILUSDPrice\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"},{\"internalType\":\"int32\",\"name\":\"\",\"type\":\"int32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getNextChallengeEpoch\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNextProofSetId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getNextRootId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getProofSetLastProvenEpoch\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getProofSetLeafCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getProofSetListener\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getProofSetOwner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"epoch\",\"type\":\"uint256\"}],\"name\":\"getRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"}],\"name\":\"getRootCid\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"struct Cids.Cid\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"}],\"name\":\"getRootLeafCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getScheduledRemovals\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_challengeFinality\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"challengeEpoch\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"nextProvingPeriod\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"proofSetLive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"proposeProofSetOwner\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"leaf\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"proof\",\"type\":\"bytes32[]\"}],\"internalType\":\"struct PDPVerifier.Proof[]\",\"name\":\"proofs\",\"type\":\"tuple[]\"}],\"name\":\"provePossession\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"proxiableUUID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"}],\"name\":\"rootChallengable\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rootId\",\"type\":\"uint256\"}],\"name\":\"rootLive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"rootIds\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"scheduleRemovals\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newImplementation\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"upgradeToAndCall\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}],\"devdoc\":{\"errors\":{\"AddressEmptyCode(address)\":[{\"details\":\"There's no code at `target` (it is not a contract).\"}],\"ERC1967InvalidImplementation(address)\":[{\"details\":\"The `implementation` of the proxy is invalid.\"}],\"ERC1967NonPayable()\":[{\"details\":\"An upgrade function sees `msg.value > 0` that may be lost.\"}],\"FailedCall()\":[{\"details\":\"A call to an address target failed. The target may have reverted.\"}],\"InvalidInitialization()\":[{\"details\":\"The contract is already initialized.\"}],\"NotInitializing()\":[{\"details\":\"The contract is not initializing.\"}],\"OwnableInvalidOwner(address)\":[{\"details\":\"The owner is not a valid owner account. (eg. `address(0)`)\"}],\"OwnableUnauthorizedAccount(address)\":[{\"details\":\"The caller account is not authorized to perform an operation.\"}],\"UUPSUnauthorizedCallContext()\":[{\"details\":\"The call is from an unauthorized context.\"}],\"UUPSUnsupportedProxiableUUID(bytes32)\":[{\"details\":\"The storage `slot` is unsupported as a UUID.\"}]},\"events\":{\"Initialized(uint64)\":{\"details\":\"Triggered when the contract has been initialized or reinitialized.\"},\"Upgraded(address)\":{\"details\":\"Emitted when the implementation is upgraded.\"}},\"kind\":\"dev\",\"methods\":{\"constructor\":{\"custom:oz-upgrades-unsafe-allow\":\"constructor\"},\"owner()\":{\"details\":\"Returns the address of the current owner.\"},\"proxiableUUID()\":{\"details\":\"Implementation of the ERC-1822 {proxiableUUID} function. This returns the storage slot used by the implementation. It is used to validate the implementation's compatibility when performing an upgrade. IMPORTANT: A proxy pointing at a proxiable contract should not be considered proxiable itself, because this risks bricking a proxy that upgrades to it, by delegating to itself until out of gas. Thus it is critical that this function revert if invoked through a proxy. This is guaranteed by the `notDelegated` modifier.\"},\"renounceOwnership()\":{\"details\":\"Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.\"},\"transferOwnership(address)\":{\"details\":\"Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.\"},\"upgradeToAndCall(address,bytes)\":{\"custom:oz-upgrades-unsafe-allow-reachable\":\"delegatecall\",\"details\":\"Upgrade the implementation of the proxy to `newImplementation`, and subsequently execute the function call encoded in `data`. Calls {_authorizeUpgrade}. Emits an {Upgraded} event.\"}},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/PDPVerifier.sol\":\"PDPVerifier\"},\"evmVersion\":\"shanghai\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":false,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/\",\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":@pythnetwork/pyth-sdk-solidity/=node_modules/@pythnetwork/pyth-sdk-solidity/\",\":erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/\",\":openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\"]},\"sources\":{\"lib/openzeppelin-contracts-upgradeable/contracts/access/OwnableUpgradeable.sol\":{\"keccak256\":\"0xc163fcf9bb10138631a9ba5564df1fa25db9adff73bd9ee868a8ae1858fe093a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9706d43a0124053d9880f6e31a59f31bc0a6a3dc1acd66ce0a16e1111658c5f6\",\"dweb:/ipfs/QmUFmfowzkRwGtDu36cXV9SPTBHJ3n7dG9xQiK5B28jTf2\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/Initializable.sol\":{\"keccak256\":\"0x631188737069917d2f909d29ce62c4d48611d326686ba6683e26b72a23bfac0b\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://7a61054ae84cd6c4d04c0c4450ba1d6de41e27e0a2c4f1bcdf58f796b401c609\",\"dweb:/ipfs/QmUvtdp7X1mRVyC3CsHrtPbgoqWaXHp3S1ZR24tpAQYJWM\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/UUPSUpgradeable.sol\":{\"keccak256\":\"0x8816653b632f8f634b78885c35112232b44acbf6033ec9e5065d2dd94946b15a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://6c16be456b19a1dbaaff7e89b9f6f5c92a02544d5d5f89222a9f57b5a8cfc2f0\",\"dweb:/ipfs/QmS4aeG6paPRwAM1puekhkyGR4mHuMUzFz3riVDv7fbvvB\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/utils/ContextUpgradeable.sol\":{\"keccak256\":\"0xdbef5f0c787055227243a7318ef74c8a5a1108ca3a07f2b3a00ef67769e1e397\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://08e39f23d5b4692f9a40803e53a8156b72b4c1f9902a88cd65ba964db103dab9\",\"dweb:/ipfs/QmPKn6EYDgpga7KtpkA8wV2yJCYGMtc9K4LkJfhKX2RVSV\"]},\"lib/openzeppelin-contracts/contracts/interfaces/IERC1967.sol\":{\"keccak256\":\"0xb25a4f11fa80c702bf5cd85adec90e6f6f507f32f4a8e6f5dbc31e8c10029486\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://6917f8a323e7811f041aecd4d9fd6e92455a6fba38a797ac6f6e208c7912b79d\",\"dweb:/ipfs/QmShuYv55wYHGi4EFkDB8QfF7ZCHoKk2efyz3AWY1ExSq7\"]},\"lib/openzeppelin-contracts/contracts/interfaces/draft-IERC1822.sol\":{\"keccak256\":\"0xc42facb5094f2f35f066a7155bda23545e39a3156faef3ddc00185544443ba7d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://d3b36282ab029b46bd082619a308a2ea11c309967b9425b7b7a6eb0b0c1c3196\",\"dweb:/ipfs/QmP2YVfDB2FoREax3vJu7QhDnyYRMw52WPrCD4vdT2kuDA\"]},\"lib/openzeppelin-contracts/contracts/proxy/ERC1967/ERC1967Utils.sol\":{\"keccak256\":\"0x02caa0e5f7bade9a0d8ad6058467d641cb67697cd4678c7b1c170686bafe9128\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://33b42a434f5d5fdc5071be05238059b9d8938bdab510071a5c300a975abc405a\",\"dweb:/ipfs/QmaThmoD3JMdHGhn4GUJbEGnKcojUG8PWMFoC7DFcQoeCw\"]},\"lib/openzeppelin-contracts/contracts/proxy/beacon/IBeacon.sol\":{\"keccak256\":\"0xc59a78b07b44b2cf2e8ab4175fca91e8eca1eee2df7357b8d2a8833e5ea1f64c\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://5aa4f07e65444784c29cd7bfcc2341b34381e4e5b5da9f0c5bd00d7f430e66fa\",\"dweb:/ipfs/QmWRMh4Q9DpaU9GvsiXmDdoNYMyyece9if7hnfLz7uqzWM\"]},\"lib/openzeppelin-contracts/contracts/utils/Address.sol\":{\"keccak256\":\"0x9d8da059267bac779a2dbbb9a26c2acf00ca83085e105d62d5d4ef96054a47f5\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://c78e2aa4313323cecd1ef12a8d6265b96beee1a199923abf55d9a2a9e291ad23\",\"dweb:/ipfs/QmUTs2KStXucZezzFo3EYeqYu47utu56qrF7jj1Gue65vb\"]},\"lib/openzeppelin-contracts/contracts/utils/Errors.sol\":{\"keccak256\":\"0x6afa713bfd42cf0f7656efa91201007ac465e42049d7de1d50753a373648c123\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://ba1d02f4847670a1b83dec9f7d37f0b0418d6043447b69f3a29a5f9efc547fcf\",\"dweb:/ipfs/QmQ7iH2keLNUKgq2xSWcRmuBE5eZ3F5whYAkAGzCNNoEWB\"]},\"lib/openzeppelin-contracts/contracts/utils/StorageSlot.sol\":{\"keccak256\":\"0xcf74f855663ce2ae00ed8352666b7935f6cddea2932fdf2c3ecd30a9b1cd0e97\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9f660b1f351b757dfe01438e59888f31f33ded3afcf5cb5b0d9bf9aa6f320a8b\",\"dweb:/ipfs/QmarDJ5hZEgBtCmmrVzEZWjub9769eD686jmzb2XpSU1cM\"]},\"node_modules/@pythnetwork/pyth-sdk-solidity/IPyth.sol\":{\"keccak256\":\"0x217532ece69b8e472a6260b740c34aebfb5a299bbfed6392cf0458ed368be7ab\",\"license\":\"Apache-2.0\",\"urls\":[\"bzz-raw://02d1b71006ccdfd6402a2b72ea197babbd1b54c26a70ebb76a114f0ae8352f08\",\"dweb:/ipfs/QmbqfuvwriG3AEwYEwupUaQKgfxRYK6Qui99o6wQysPoP3\"]},\"node_modules/@pythnetwork/pyth-sdk-solidity/IPythEvents.sol\":{\"keccak256\":\"0x7ca8e03315d4516d6833c425a52c43e8cacf2077492074d2d36ae5c17899c9c8\",\"license\":\"Apache-2.0\",\"urls\":[\"bzz-raw://ad1c69d157eccb09ce248e1ec021f2e58b61dd36160f5be3973a7bea4a899f64\",\"dweb:/ipfs/QmW1yXsDrMsuQKxtZanSZXpyUW2QwnCKVoCjS5fC3NoSVY\"]},\"node_modules/@pythnetwork/pyth-sdk-solidity/PythStructs.sol\":{\"keccak256\":\"0xade221177dda98ebd194c363f264ceea125bde0e6a7a72f7b54da3ac60316894\",\"license\":\"Apache-2.0\",\"urls\":[\"bzz-raw://a404dbbc64183995326c345cae27601d37c783b3d9030c8dc0ab4943fa2bf1da\",\"dweb:/ipfs/QmfNFesQffYisafmJFbKHxVFSD8fY49X1z9f8N7qtfW8AX\"]},\"src/BitOps.sol\":{\"keccak256\":\"0x55fc8272df01302eba6fde6174e691ec86f791c39ac9b1c6a5e4ca1792439ca4\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://1e4de6ed5f6e6180261728a590eeb629de65db443f4f279801c03a1bc14201d7\",\"dweb:/ipfs/QmeCcCjy88QJwCkZoGbeZVjxksePwTcmhKevtA2F3kRXaT\"]},\"src/Cids.sol\":{\"keccak256\":\"0x4085c3a55cdf809251a469829bae218d03db4afd9455dab674a8a2ab3b7451dc\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://93406cf5db1b0fa908c306ab6c4d42f69990e9fd08c781de871724c525097803\",\"dweb:/ipfs/QmXkXwjhEo929M6qBXkHKBT3DowiVYcLEe5oUkFnjFJMy2\"]},\"src/Fees.sol\":{\"keccak256\":\"0x74945bddcdd334715c9fab53deba13867f17855976ae64c33abdc84dc439feb0\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://8c7ee756cc406b4192220c54c3e66908a32887c32519461e2dbae8eff144691c\",\"dweb:/ipfs/Qmdy23cwyFG2xbv1htf8FCGwFPfY98rfoKFJ9G6zcWiRkk\"]},\"src/PDPVerifier.sol\":{\"keccak256\":\"0x281e77ce7e1f0eef0d3db9be6116ed71cee92c3b8181dc3c47ff582878ca7fb7\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://63b491e882199f57ce1c5f06ce57b257e40d9c6d38eb62be61d9eba4c7103fe3\",\"dweb:/ipfs/QmQr7NGETKtAcbHJhQ6ZxwB5yoHAwukjVjZwQQGd836KRk\"]},\"src/Proofs.sol\":{\"keccak256\":\"0xf8d27dd91086ba2b4521f36227d92aae35c9f8dfcb117c775e2417166e15a737\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://8db46f122470a14d2c084655c6fada18c966accca05feae92923b1ace7a9f86b\",\"dweb:/ipfs/QmQWGxWUcpejzJt28gwbKfq5C3LLiB5HrHdXMja6HHYxbj\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.23+commit.f704f362"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"target","type":"address"}],"type":"error","name":"AddressEmptyCode"},{"inputs":[{"internalType":"address","name":"implementation","type":"address"}],"type":"error","name":"ERC1967InvalidImplementation"},{"inputs":[],"type":"error","name":"ERC1967NonPayable"},{"inputs":[],"type":"error","name":"FailedCall"},{"inputs":[{"internalType":"uint256","name":"idx","type":"uint256"},{"internalType":"string","name":"msg","type":"string"}],"type":"error","name":"IndexedError"},{"inputs":[],"type":"error","name":"InvalidInitialization"},{"inputs":[],"type":"error","name":"NotInitializing"},{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"type":"error","name":"OwnableInvalidOwner"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"type":"error","name":"OwnableUnauthorizedAccount"},{"inputs":[],"type":"error","name":"UUPSUnauthorizedCallContext"},{"inputs":[{"internalType":"bytes32","name":"slot","type":"bytes32"}],"type":"error","name":"UUPSUnsupportedProxiableUUID"},{"inputs":[{"internalType":"string","name":"message","type":"string","indexed":false},{"internalType":"uint256","name":"value","type":"uint256","indexed":false}],"type":"event","name":"Debug","anonymous":false},{"inputs":[{"internalType":"uint64","name":"version","type":"uint64","indexed":false}],"type":"event","name":"Initialized","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"challengeEpoch","type":"uint256","indexed":false},{"internalType":"uint256","name":"leafCount","type":"uint256","indexed":false}],"type":"event","name":"NextProvingPeriod","anonymous":false},{"inputs":[{"internalType":"address","name":"previousOwner","type":"address","indexed":true},{"internalType":"address","name":"newOwner","type":"address","indexed":true}],"type":"event","name":"OwnershipTransferred","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"struct PDPVerifier.RootIdAndOffset[]","name":"challenges","type":"tuple[]","components":[{"internalType":"uint256","name":"rootId","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}],"indexed":false}],"type":"event","name":"PossessionProven","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"fee","type":"uint256","indexed":false},{"internalType":"uint64","name":"price","type":"uint64","indexed":false},{"internalType":"int32","name":"expo","type":"int32","indexed":false}],"type":"event","name":"ProofFeePaid","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"address","name":"owner","type":"address","indexed":true}],"type":"event","name":"ProofSetCreated","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"deletedLeafCount","type":"uint256","indexed":false}],"type":"event","name":"ProofSetDeleted","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true}],"type":"event","name":"ProofSetEmpty","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"address","name":"oldOwner","type":"address","indexed":true},{"internalType":"address","name":"newOwner","type":"address","indexed":true}],"type":"event","name":"ProofSetOwnerChanged","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256[]","name":"rootIds","type":"uint256[]","indexed":false}],"type":"event","name":"RootsAdded","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256[]","name":"rootIds","type":"uint256[]","indexed":false}],"type":"event","name":"RootsRemoved","anonymous":false},{"inputs":[{"internalType":"address","name":"implementation","type":"address","indexed":true}],"type":"event","name":"Upgraded","anonymous":false},{"inputs":[],"stateMutability":"view","type":"function","name":"BURN_ACTOR","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"EXTRA_DATA_MAX_SIZE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"FIL_USD_PRICE_FEED_ID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"LEAF_SIZE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"MAX_ENQUEUED_REMOVALS","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"MAX_ROOT_SIZE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"NO_CHALLENGE_SCHEDULED","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"NO_PROVEN_EPOCH","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"PYTH","outputs":[{"internalType":"contract IPyth","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"RANDOMNESS_PRECOMPILE","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SECONDS_IN_DAY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"UPGRADE_INTERFACE_VERSION","outputs":[{"internalType":"string","name":"","type":"string"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"struct PDPVerifier.RootData[]","name":"rootData","type":"tuple[]","components":[{"internalType":"struct Cids.Cid","name":"root","type":"tuple","components":[{"internalType":"bytes","name":"data","type":"bytes"}]},{"internalType":"uint256","name":"rawSize","type":"uint256"}]},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"addRoots","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"estimatedGasFee","type":"uint256"}],"stateMutability":"view","type":"function","name":"calculateProofFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"nonpayable","type":"function","name":"claimProofSetOwnership"},{"inputs":[{"internalType":"address","name":"listenerAddr","type":"address"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"payable","type":"function","name":"createProofSet","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"deleteProofSet"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256[]","name":"leafIndexs","type":"uint256[]"}],"stateMutability":"view","type":"function","name":"findRootIds","outputs":[{"internalType":"struct PDPVerifier.RootIdAndOffset[]","name":"","type":"tuple[]","components":[{"internalType":"uint256","name":"rootId","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}]}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getChallengeFinality","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getChallengeRange","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getFILUSDPrice","outputs":[{"internalType":"uint64","name":"","type":"uint64"},{"internalType":"int32","name":"","type":"int32"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getNextChallengeEpoch","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getNextProofSetId","outputs":[{"internalType":"uint64","name":"","type":"uint64"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getNextRootId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getProofSetLastProvenEpoch","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getProofSetLeafCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getProofSetListener","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getProofSetOwner","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"uint256","name":"epoch","type":"uint256"}],"stateMutability":"view","type":"function","name":"getRandomness","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"rootId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getRootCid","outputs":[{"internalType":"struct Cids.Cid","name":"","type":"tuple","components":[{"internalType":"bytes","name":"data","type":"bytes"}]}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"rootId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getRootLeafCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getScheduledRemovals","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}]},{"inputs":[{"internalType":"uint256","name":"_challengeFinality","type":"uint256"}],"stateMutability":"nonpayable","type":"function","name":"initialize"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"challengeEpoch","type":"uint256"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"nextProvingPeriod"},{"inputs":[],"stateMutability":"view","type":"function","name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"proofSetLive","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"address","name":"newOwner","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"proposeProofSetOwner"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"struct PDPVerifier.Proof[]","name":"proofs","type":"tuple[]","components":[{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes32[]","name":"proof","type":"bytes32[]"}]}],"stateMutability":"payable","type":"function","name":"provePossession"},{"inputs":[],"stateMutability":"view","type":"function","name":"proxiableUUID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"renounceOwnership"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"rootId","type":"uint256"}],"stateMutability":"view","type":"function","name":"rootChallengable","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"rootId","type":"uint256"}],"stateMutability":"view","type":"function","name":"rootLive","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256[]","name":"rootIds","type":"uint256[]"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"scheduleRemovals"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"transferOwnership"},{"inputs":[{"internalType":"address","name":"newImplementation","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"stateMutability":"payable","type":"function","name":"upgradeToAndCall"}],"devdoc":{"kind":"dev","methods":{"constructor":{"custom:oz-upgrades-unsafe-allow":"constructor"},"owner()":{"details":"Returns the address of the current owner."},"proxiableUUID()":{"details":"Implementation of the ERC-1822 {proxiableUUID} function. This returns the storage slot used by the implementation. It is used to validate the implementation's compatibility when performing an upgrade. IMPORTANT: A proxy pointing at a proxiable contract should not be considered proxiable itself, because this risks bricking a proxy that upgrades to it, by delegating to itself until out of gas. Thus it is critical that this function revert if invoked through a proxy. This is guaranteed by the `notDelegated` modifier."},"renounceOwnership()":{"details":"Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner."},"transferOwnership(address)":{"details":"Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner."},"upgradeToAndCall(address,bytes)":{"custom:oz-upgrades-unsafe-allow-reachable":"delegatecall","details":"Upgrade the implementation of the proxy to `newImplementation`, and subsequently execute the function call encoded in `data`. Calls {_authorizeUpgrade}. Emits an {Upgraded} event."}},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":["@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/","@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","@pythnetwork/pyth-sdk-solidity/=node_modules/@pythnetwork/pyth-sdk-solidity/","erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/","openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/","openzeppelin-contracts/=lib/openzeppelin-contracts/"],"optimizer":{"enabled":false,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/PDPVerifier.sol":"PDPVerifier"},"evmVersion":"shanghai","libraries":{}},"sources":{"lib/openzeppelin-contracts-upgradeable/contracts/access/OwnableUpgradeable.sol":{"keccak256":"0xc163fcf9bb10138631a9ba5564df1fa25db9adff73bd9ee868a8ae1858fe093a","urls":["bzz-raw://9706d43a0124053d9880f6e31a59f31bc0a6a3dc1acd66ce0a16e1111658c5f6","dweb:/ipfs/QmUFmfowzkRwGtDu36cXV9SPTBHJ3n7dG9xQiK5B28jTf2"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/Initializable.sol":{"keccak256":"0x631188737069917d2f909d29ce62c4d48611d326686ba6683e26b72a23bfac0b","urls":["bzz-raw://7a61054ae84cd6c4d04c0c4450ba1d6de41e27e0a2c4f1bcdf58f796b401c609","dweb:/ipfs/QmUvtdp7X1mRVyC3CsHrtPbgoqWaXHp3S1ZR24tpAQYJWM"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/UUPSUpgradeable.sol":{"keccak256":"0x8816653b632f8f634b78885c35112232b44acbf6033ec9e5065d2dd94946b15a","urls":["bzz-raw://6c16be456b19a1dbaaff7e89b9f6f5c92a02544d5d5f89222a9f57b5a8cfc2f0","dweb:/ipfs/QmS4aeG6paPRwAM1puekhkyGR4mHuMUzFz3riVDv7fbvvB"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/utils/ContextUpgradeable.sol":{"keccak256":"0xdbef5f0c787055227243a7318ef74c8a5a1108ca3a07f2b3a00ef67769e1e397","urls":["bzz-raw://08e39f23d5b4692f9a40803e53a8156b72b4c1f9902a88cd65ba964db103dab9","dweb:/ipfs/QmPKn6EYDgpga7KtpkA8wV2yJCYGMtc9K4LkJfhKX2RVSV"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/interfaces/IERC1967.sol":{"keccak256":"0xb25a4f11fa80c702bf5cd85adec90e6f6f507f32f4a8e6f5dbc31e8c10029486","urls":["bzz-raw://6917f8a323e7811f041aecd4d9fd6e92455a6fba38a797ac6f6e208c7912b79d","dweb:/ipfs/QmShuYv55wYHGi4EFkDB8QfF7ZCHoKk2efyz3AWY1ExSq7"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/interfaces/draft-IERC1822.sol":{"keccak256":"0xc42facb5094f2f35f066a7155bda23545e39a3156faef3ddc00185544443ba7d","urls":["bzz-raw://d3b36282ab029b46bd082619a308a2ea11c309967b9425b7b7a6eb0b0c1c3196","dweb:/ipfs/QmP2YVfDB2FoREax3vJu7QhDnyYRMw52WPrCD4vdT2kuDA"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/proxy/ERC1967/ERC1967Utils.sol":{"keccak256":"0x02caa0e5f7bade9a0d8ad6058467d641cb67697cd4678c7b1c170686bafe9128","urls":["bzz-raw://33b42a434f5d5fdc5071be05238059b9d8938bdab510071a5c300a975abc405a","dweb:/ipfs/QmaThmoD3JMdHGhn4GUJbEGnKcojUG8PWMFoC7DFcQoeCw"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/proxy/beacon/IBeacon.sol":{"keccak256":"0xc59a78b07b44b2cf2e8ab4175fca91e8eca1eee2df7357b8d2a8833e5ea1f64c","urls":["bzz-raw://5aa4f07e65444784c29cd7bfcc2341b34381e4e5b5da9f0c5bd00d7f430e66fa","dweb:/ipfs/QmWRMh4Q9DpaU9GvsiXmDdoNYMyyece9if7hnfLz7uqzWM"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Address.sol":{"keccak256":"0x9d8da059267bac779a2dbbb9a26c2acf00ca83085e105d62d5d4ef96054a47f5","urls":["bzz-raw://c78e2aa4313323cecd1ef12a8d6265b96beee1a199923abf55d9a2a9e291ad23","dweb:/ipfs/QmUTs2KStXucZezzFo3EYeqYu47utu56qrF7jj1Gue65vb"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Errors.sol":{"keccak256":"0x6afa713bfd42cf0f7656efa91201007ac465e42049d7de1d50753a373648c123","urls":["bzz-raw://ba1d02f4847670a1b83dec9f7d37f0b0418d6043447b69f3a29a5f9efc547fcf","dweb:/ipfs/QmQ7iH2keLNUKgq2xSWcRmuBE5eZ3F5whYAkAGzCNNoEWB"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/StorageSlot.sol":{"keccak256":"0xcf74f855663ce2ae00ed8352666b7935f6cddea2932fdf2c3ecd30a9b1cd0e97","urls":["bzz-raw://9f660b1f351b757dfe01438e59888f31f33ded3afcf5cb5b0d9bf9aa6f320a8b","dweb:/ipfs/QmarDJ5hZEgBtCmmrVzEZWjub9769eD686jmzb2XpSU1cM"],"license":"MIT"},"node_modules/@pythnetwork/pyth-sdk-solidity/IPyth.sol":{"keccak256":"0x217532ece69b8e472a6260b740c34aebfb5a299bbfed6392cf0458ed368be7ab","urls":["bzz-raw://02d1b71006ccdfd6402a2b72ea197babbd1b54c26a70ebb76a114f0ae8352f08","dweb:/ipfs/QmbqfuvwriG3AEwYEwupUaQKgfxRYK6Qui99o6wQysPoP3"],"license":"Apache-2.0"},"node_modules/@pythnetwork/pyth-sdk-solidity/IPythEvents.sol":{"keccak256":"0x7ca8e03315d4516d6833c425a52c43e8cacf2077492074d2d36ae5c17899c9c8","urls":["bzz-raw://ad1c69d157eccb09ce248e1ec021f2e58b61dd36160f5be3973a7bea4a899f64","dweb:/ipfs/QmW1yXsDrMsuQKxtZanSZXpyUW2QwnCKVoCjS5fC3NoSVY"],"license":"Apache-2.0"},"node_modules/@pythnetwork/pyth-sdk-solidity/PythStructs.sol":{"keccak256":"0xade221177dda98ebd194c363f264ceea125bde0e6a7a72f7b54da3ac60316894","urls":["bzz-raw://a404dbbc64183995326c345cae27601d37c783b3d9030c8dc0ab4943fa2bf1da","dweb:/ipfs/QmfNFesQffYisafmJFbKHxVFSD8fY49X1z9f8N7qtfW8AX"],"license":"Apache-2.0"},"src/BitOps.sol":{"keccak256":"0x55fc8272df01302eba6fde6174e691ec86f791c39ac9b1c6a5e4ca1792439ca4","urls":["bzz-raw://1e4de6ed5f6e6180261728a590eeb629de65db443f4f279801c03a1bc14201d7","dweb:/ipfs/QmeCcCjy88QJwCkZoGbeZVjxksePwTcmhKevtA2F3kRXaT"],"license":"UNLICENSED"},"src/Cids.sol":{"keccak256":"0x4085c3a55cdf809251a469829bae218d03db4afd9455dab674a8a2ab3b7451dc","urls":["bzz-raw://93406cf5db1b0fa908c306ab6c4d42f69990e9fd08c781de871724c525097803","dweb:/ipfs/QmXkXwjhEo929M6qBXkHKBT3DowiVYcLEe5oUkFnjFJMy2"],"license":"UNLICENSED"},"src/Fees.sol":{"keccak256":"0x74945bddcdd334715c9fab53deba13867f17855976ae64c33abdc84dc439feb0","urls":["bzz-raw://8c7ee756cc406b4192220c54c3e66908a32887c32519461e2dbae8eff144691c","dweb:/ipfs/Qmdy23cwyFG2xbv1htf8FCGwFPfY98rfoKFJ9G6zcWiRkk"],"license":"UNLICENSED"},"src/PDPVerifier.sol":{"keccak256":"0x281e77ce7e1f0eef0d3db9be6116ed71cee92c3b8181dc3c47ff582878ca7fb7","urls":["bzz-raw://63b491e882199f57ce1c5f06ce57b257e40d9c6d38eb62be61d9eba4c7103fe3","dweb:/ipfs/QmQr7NGETKtAcbHJhQ6ZxwB5yoHAwukjVjZwQQGd836KRk"],"license":"UNLICENSED"},"src/Proofs.sol":{"keccak256":"0xf8d27dd91086ba2b4521f36227d92aae35c9f8dfcb117c775e2417166e15a737","urls":["bzz-raw://8db46f122470a14d2c084655c6fada18c966accca05feae92923b1ace7a9f86b","dweb:/ipfs/QmQWGxWUcpejzJt28gwbKfq5C3LLiB5HrHdXMja6HHYxbj"],"license":"MIT"}},"version":1},"id":17} \ No newline at end of file +{"abi":[{"type":"function","name":"addPieces","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceData","type":"tuple[]","internalType":"struct Cids.Cid[]","components":[{"name":"data","type":"bytes","internalType":"bytes"}]},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"nonpayable"},{"type":"function","name":"claimDataSetStorageProvider","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"createDataSet","inputs":[{"name":"listenerAddr","type":"address","internalType":"address"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"payable"},{"type":"function","name":"dataSetLive","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"deleteDataSet","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"findPieceIds","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"leafIndexs","type":"uint256[]","internalType":"uint256[]"}],"outputs":[{"name":"","type":"tuple[]","internalType":"struct IPDPTypes.PieceIdAndOffset[]","components":[{"name":"pieceId","type":"uint256","internalType":"uint256"},{"name":"offset","type":"uint256","internalType":"uint256"}]}],"stateMutability":"view"},{"type":"function","name":"getChallengeFinality","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getChallengeRange","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getDataSetLastProvenEpoch","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getDataSetLeafCount","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getDataSetListener","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"getDataSetStorageProvider","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"address","internalType":"address"},{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"getNextChallengeEpoch","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getNextDataSetId","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"getNextPieceId","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getPieceCid","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"getPieceLeafCount","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"getScheduledRemovals","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256[]","internalType":"uint256[]"}],"stateMutability":"view"},{"type":"function","name":"nextProvingPeriod","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"challengeEpoch","type":"uint256","internalType":"uint256"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"pieceChallengable","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"pieceLive","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceId","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"proposeDataSetStorageProvider","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"newStorageProvider","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"provePossession","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"proofs","type":"tuple[]","internalType":"struct IPDPTypes.Proof[]","components":[{"name":"leaf","type":"bytes32","internalType":"bytes32"},{"name":"proof","type":"bytes32[]","internalType":"bytes32[]"}]}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"schedulePieceDeletions","inputs":[{"name":"setId","type":"uint256","internalType":"uint256"},{"name":"pieceIds","type":"uint256[]","internalType":"uint256[]"},{"name":"extraData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"ContractUpgraded","inputs":[{"name":"version","type":"string","indexed":false,"internalType":"string"},{"name":"newImplementation","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"DataSetCreated","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"storageProvider","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"DataSetDeleted","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"deletedLeafCount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"DataSetEmpty","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"NextProvingPeriod","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"challengeEpoch","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"leafCount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"PiecesAdded","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"pieceIds","type":"uint256[]","indexed":false,"internalType":"uint256[]"},{"name":"pieceCids","type":"tuple[]","indexed":false,"internalType":"struct Cids.Cid[]","components":[{"name":"data","type":"bytes","internalType":"bytes"}]}],"anonymous":false},{"type":"event","name":"PiecesRemoved","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"pieceIds","type":"uint256[]","indexed":false,"internalType":"uint256[]"}],"anonymous":false},{"type":"event","name":"PossessionProven","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"challenges","type":"tuple[]","indexed":false,"internalType":"struct IPDPTypes.PieceIdAndOffset[]","components":[{"name":"pieceId","type":"uint256","internalType":"uint256"},{"name":"offset","type":"uint256","internalType":"uint256"}]}],"anonymous":false},{"type":"event","name":"ProofFeePaid","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"fee","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"price","type":"uint64","indexed":false,"internalType":"uint64"},{"name":"expo","type":"int32","indexed":false,"internalType":"int32"}],"anonymous":false},{"type":"event","name":"StorageProviderChanged","inputs":[{"name":"setId","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"oldStorageProvider","type":"address","indexed":true,"internalType":"address"},{"name":"newStorageProvider","type":"address","indexed":true,"internalType":"address"}],"anonymous":false}],"bytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"deployedBytecode":{"object":"0x","sourceMap":"","linkReferences":{}},"methodIdentifiers":{"addPieces(uint256,(bytes)[],bytes)":"306fc8be","claimDataSetStorageProvider(uint256,bytes)":"df0f3248","createDataSet(address,bytes)":"bbae41cb","dataSetLive(uint256)":"ca759f27","deleteDataSet(uint256,bytes)":"7a1e2990","findPieceIds(uint256,uint256[])":"349c9179","getChallengeFinality()":"f83758fe","getChallengeRange(uint256)":"89208ba9","getDataSetLastProvenEpoch(uint256)":"04595c1a","getDataSetLeafCount(uint256)":"a531998c","getDataSetListener(uint256)":"2b3129bb","getDataSetStorageProvider(uint256)":"21b7cd1c","getNextChallengeEpoch(uint256)":"6ba4608f","getNextDataSetId()":"442cded3","getNextPieceId(uint256)":"1c5ae80f","getPieceCid(uint256,uint256)":"25bbbedf","getPieceLeafCount(uint256,uint256)":"0cd7b880","getScheduledRemovals(uint256)":"6fa44692","nextProvingPeriod(uint256,uint256,bytes)":"45c0b92d","pieceChallengable(uint256,uint256)":"dc635266","pieceLive(uint256,uint256)":"1a271225","proposeDataSetStorageProvider(uint256,address)":"43186080","provePossession(uint256,(bytes32,bytes32[])[])":"f58f952b","schedulePieceDeletions(uint256,uint256[],bytes)":"0c292024"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.23+commit.f704f362\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"string\",\"name\":\"version\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newImplementation\",\"type\":\"address\"}],\"name\":\"ContractUpgraded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"storageProvider\",\"type\":\"address\"}],\"name\":\"DataSetCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"deletedLeafCount\",\"type\":\"uint256\"}],\"name\":\"DataSetDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"DataSetEmpty\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"challengeEpoch\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"leafCount\",\"type\":\"uint256\"}],\"name\":\"NextProvingPeriod\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"pieceIds\",\"type\":\"uint256[]\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"indexed\":false,\"internalType\":\"struct Cids.Cid[]\",\"name\":\"pieceCids\",\"type\":\"tuple[]\"}],\"name\":\"PiecesAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"pieceIds\",\"type\":\"uint256[]\"}],\"name\":\"PiecesRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"offset\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"struct IPDPTypes.PieceIdAndOffset[]\",\"name\":\"challenges\",\"type\":\"tuple[]\"}],\"name\":\"PossessionProven\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"price\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"int32\",\"name\":\"expo\",\"type\":\"int32\"}],\"name\":\"ProofFeePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oldStorageProvider\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newStorageProvider\",\"type\":\"address\"}],\"name\":\"StorageProviderChanged\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"struct Cids.Cid[]\",\"name\":\"pieceData\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"addPieces\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"claimDataSetStorageProvider\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"listenerAddr\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"createDataSet\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"dataSetLive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"deleteDataSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"leafIndexs\",\"type\":\"uint256[]\"}],\"name\":\"findPieceIds\",\"outputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"offset\",\"type\":\"uint256\"}],\"internalType\":\"struct IPDPTypes.PieceIdAndOffset[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChallengeFinality\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getChallengeRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getDataSetLastProvenEpoch\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getDataSetLeafCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getDataSetListener\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getDataSetStorageProvider\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getNextChallengeEpoch\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNextDataSetId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getNextPieceId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"}],\"name\":\"getPieceCid\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"}],\"name\":\"getPieceLeafCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"}],\"name\":\"getScheduledRemovals\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"challengeEpoch\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"nextProvingPeriod\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"}],\"name\":\"pieceChallengable\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pieceId\",\"type\":\"uint256\"}],\"name\":\"pieceLive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newStorageProvider\",\"type\":\"address\"}],\"name\":\"proposeDataSetStorageProvider\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"leaf\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"proof\",\"type\":\"bytes32[]\"}],\"internalType\":\"struct IPDPTypes.Proof[]\",\"name\":\"proofs\",\"type\":\"tuple[]\"}],\"name\":\"provePossession\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"setId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"pieceIds\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"schedulePieceDeletions\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"title\":\"IPDPVerifier\",\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"notice\":\"Main interface for the PDPVerifier contract\",\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/interfaces/IPDPVerifier.sol\":\"IPDPVerifier\"},\"evmVersion\":\"shanghai\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":1000000000},\"remappings\":[\":@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/\",\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":@pythnetwork/pyth-sdk-solidity/=lib/pyth-sdk-solidity/\",\":erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/\",\":openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\",\":pyth-sdk-solidity/=lib/pyth-sdk-solidity/\"],\"viaIR\":true},\"sources\":{\"lib/openzeppelin-contracts/contracts/utils/Panic.sol\":{\"keccak256\":\"0xf7fe324703a64fc51702311dc51562d5cb1497734f074e4f483bfb6717572d7a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://c6a5ff4f9fd8649b7ee20800b7fa387d3465bd77cf20c2d1068cd5c98e1ed57a\",\"dweb:/ipfs/QmVSaVJf9FXFhdYEYeCEfjMVHrxDh5qL4CGkxdMWpQCrqG\"]},\"lib/openzeppelin-contracts/contracts/utils/Strings.sol\":{\"keccak256\":\"0x9e82c00fb176503860139c6bbf593b1a954ee7ff97ab2969656571382b9b58a2\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://34b4eb157b44d4441315db65561ba7cf0fe909dc598c2cfd7080d203077d5b57\",\"dweb:/ipfs/QmcNdvK3kDUAUr48urHxoeHd1TqVDya4YfZTM66i4goEJn\"]},\"lib/openzeppelin-contracts/contracts/utils/math/Math.sol\":{\"keccak256\":\"0x2c33f654cefbbe80a9b436b5792cfe8bda2e87f139f110073c99762558db252f\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://dc7ebd80046a52f28978cf46a24ff3e4c8568264ab6bb138038951c75d576167\",\"dweb:/ipfs/QmQQjXVr4CbDR3DXd8GHEqn3JSJYTnbBHMJp9tvc29yXrc\"]},\"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol\":{\"keccak256\":\"0x195533c86d0ef72bcc06456a4f66a9b941f38eb403739b00f21fd7c1abd1ae54\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://b1d578337048cad08c1c03041cca5978eff5428aa130c781b271ad9e5566e1f8\",\"dweb:/ipfs/QmPFKL2r9CBsMwmUqqdcFPfHZB2qcs9g1HDrPxzWSxomvy\"]},\"lib/openzeppelin-contracts/contracts/utils/math/SignedMath.sol\":{\"keccak256\":\"0xb1970fac7b64e6c09611e6691791e848d5e3fe410fa5899e7df2e0afd77a99e3\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://db5fbb3dddd8b7047465b62575d96231ba8a2774d37fb4737fbf23340fabbb03\",\"dweb:/ipfs/QmVUSvooZKEdEdap619tcJjTLcAuH6QBdZqAzWwnAXZAWJ\"]},\"src/BitOps.sol\":{\"keccak256\":\"0xe6447477342b60f948cb1785c7a723af7da96360887be0e23525604f960f69dc\",\"license\":\"Apache-2.0 OR MIT\",\"urls\":[\"bzz-raw://fe79487c0972995f4cf66c3c0c772777270ffd2faf3fdfcc2e5a974f953f6b04\",\"dweb:/ipfs/QmPNrVcbPYr7kLQjczra7CicTvHijPjrvYBFCBEucKYppM\"]},\"src/Cids.sol\":{\"keccak256\":\"0x4a58ce512bbf6ba57a445b6887c8fb244ba1762d0ccc58e88eb25c13448074f9\",\"license\":\"Apache-2.0 OR MIT\",\"urls\":[\"bzz-raw://76a20f52425df5023d58eda12d8ddb7fc28839c75213e3275fb7eca4985bbd3d\",\"dweb:/ipfs/QmcY1hevEnzUAN4nbNM2t2j8Hv284Ue9bB6fjQjktE1y6j\"]},\"src/interfaces/IPDPEvents.sol\":{\"keccak256\":\"0xbf68ec912762eea46e7121f579e1b9c8c04f2769a8535c012764db823450d356\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://02523a2fee16374e5c180e309cb91229cb822c8ac0beeeaae9501803d80e361a\",\"dweb:/ipfs/QmVgdVzjHJr77T7hRTqDcASpRHbfzH188743zNZmRSM5aQ\"]},\"src/interfaces/IPDPTypes.sol\":{\"keccak256\":\"0x1c5c9eb660a639c30b8b3cc09cc4d4c646935467440281b8c668365e237b6846\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://91c470447e2922976856129e75670c9a4c64dd34c62442fa85a99a67a2add77f\",\"dweb:/ipfs/QmNUXEUnhRRkfKzENo5dR1a1YWsytL7jMkGvxd913mat8t\"]},\"src/interfaces/IPDPVerifier.sol\":{\"keccak256\":\"0x9a315b3ac2d8700a75033f948a9f8234fd825ee970eda093dfac1ef920dc2437\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://8de054dd020f346e35cdaff03e7944184e5272a78f80c801b3f879dba2f27dc3\",\"dweb:/ipfs/QmdiVw2f9NmgdDukYfpWmnDPPJvHPikGdMWFzXJiFxRMcc\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.23+commit.f704f362"},"language":"Solidity","output":{"abi":[{"inputs":[{"internalType":"string","name":"version","type":"string","indexed":false},{"internalType":"address","name":"newImplementation","type":"address","indexed":false}],"type":"event","name":"ContractUpgraded","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"address","name":"storageProvider","type":"address","indexed":true}],"type":"event","name":"DataSetCreated","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"deletedLeafCount","type":"uint256","indexed":false}],"type":"event","name":"DataSetDeleted","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true}],"type":"event","name":"DataSetEmpty","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"challengeEpoch","type":"uint256","indexed":false},{"internalType":"uint256","name":"leafCount","type":"uint256","indexed":false}],"type":"event","name":"NextProvingPeriod","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256[]","name":"pieceIds","type":"uint256[]","indexed":false},{"internalType":"struct Cids.Cid[]","name":"pieceCids","type":"tuple[]","components":[{"internalType":"bytes","name":"data","type":"bytes"}],"indexed":false}],"type":"event","name":"PiecesAdded","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256[]","name":"pieceIds","type":"uint256[]","indexed":false}],"type":"event","name":"PiecesRemoved","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"struct IPDPTypes.PieceIdAndOffset[]","name":"challenges","type":"tuple[]","components":[{"internalType":"uint256","name":"pieceId","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}],"indexed":false}],"type":"event","name":"PossessionProven","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"uint256","name":"fee","type":"uint256","indexed":false},{"internalType":"uint64","name":"price","type":"uint64","indexed":false},{"internalType":"int32","name":"expo","type":"int32","indexed":false}],"type":"event","name":"ProofFeePaid","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256","indexed":true},{"internalType":"address","name":"oldStorageProvider","type":"address","indexed":true},{"internalType":"address","name":"newStorageProvider","type":"address","indexed":true}],"type":"event","name":"StorageProviderChanged","anonymous":false},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"struct Cids.Cid[]","name":"pieceData","type":"tuple[]","components":[{"internalType":"bytes","name":"data","type":"bytes"}]},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"addPieces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"claimDataSetStorageProvider"},{"inputs":[{"internalType":"address","name":"listenerAddr","type":"address"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"payable","type":"function","name":"createDataSet","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"dataSetLive","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"deleteDataSet"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256[]","name":"leafIndexs","type":"uint256[]"}],"stateMutability":"view","type":"function","name":"findPieceIds","outputs":[{"internalType":"struct IPDPTypes.PieceIdAndOffset[]","name":"","type":"tuple[]","components":[{"internalType":"uint256","name":"pieceId","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}]}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getChallengeFinality","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getChallengeRange","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getDataSetLastProvenEpoch","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getDataSetLeafCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getDataSetListener","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getDataSetStorageProvider","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getNextChallengeEpoch","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getNextDataSetId","outputs":[{"internalType":"uint64","name":"","type":"uint64"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getNextPieceId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"pieceId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getPieceCid","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"pieceId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getPieceLeafCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"}],"stateMutability":"view","type":"function","name":"getScheduledRemovals","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"challengeEpoch","type":"uint256"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"nextProvingPeriod"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"pieceId","type":"uint256"}],"stateMutability":"view","type":"function","name":"pieceChallengable","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256","name":"pieceId","type":"uint256"}],"stateMutability":"view","type":"function","name":"pieceLive","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"address","name":"newStorageProvider","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"proposeDataSetStorageProvider"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"struct IPDPTypes.Proof[]","name":"proofs","type":"tuple[]","components":[{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes32[]","name":"proof","type":"bytes32[]"}]}],"stateMutability":"payable","type":"function","name":"provePossession"},{"inputs":[{"internalType":"uint256","name":"setId","type":"uint256"},{"internalType":"uint256[]","name":"pieceIds","type":"uint256[]"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"schedulePieceDeletions"}],"devdoc":{"kind":"dev","methods":{},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":["@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/","@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","@pythnetwork/pyth-sdk-solidity/=lib/pyth-sdk-solidity/","erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","halmos-cheatcodes/=lib/openzeppelin-contracts-upgradeable/lib/halmos-cheatcodes/src/","openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/","openzeppelin-contracts/=lib/openzeppelin-contracts/","pyth-sdk-solidity/=lib/pyth-sdk-solidity/"],"optimizer":{"enabled":true,"runs":1000000000},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/interfaces/IPDPVerifier.sol":"IPDPVerifier"},"evmVersion":"shanghai","libraries":{},"viaIR":true},"sources":{"lib/openzeppelin-contracts/contracts/utils/Panic.sol":{"keccak256":"0xf7fe324703a64fc51702311dc51562d5cb1497734f074e4f483bfb6717572d7a","urls":["bzz-raw://c6a5ff4f9fd8649b7ee20800b7fa387d3465bd77cf20c2d1068cd5c98e1ed57a","dweb:/ipfs/QmVSaVJf9FXFhdYEYeCEfjMVHrxDh5qL4CGkxdMWpQCrqG"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Strings.sol":{"keccak256":"0x9e82c00fb176503860139c6bbf593b1a954ee7ff97ab2969656571382b9b58a2","urls":["bzz-raw://34b4eb157b44d4441315db65561ba7cf0fe909dc598c2cfd7080d203077d5b57","dweb:/ipfs/QmcNdvK3kDUAUr48urHxoeHd1TqVDya4YfZTM66i4goEJn"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/Math.sol":{"keccak256":"0x2c33f654cefbbe80a9b436b5792cfe8bda2e87f139f110073c99762558db252f","urls":["bzz-raw://dc7ebd80046a52f28978cf46a24ff3e4c8568264ab6bb138038951c75d576167","dweb:/ipfs/QmQQjXVr4CbDR3DXd8GHEqn3JSJYTnbBHMJp9tvc29yXrc"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol":{"keccak256":"0x195533c86d0ef72bcc06456a4f66a9b941f38eb403739b00f21fd7c1abd1ae54","urls":["bzz-raw://b1d578337048cad08c1c03041cca5978eff5428aa130c781b271ad9e5566e1f8","dweb:/ipfs/QmPFKL2r9CBsMwmUqqdcFPfHZB2qcs9g1HDrPxzWSxomvy"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/SignedMath.sol":{"keccak256":"0xb1970fac7b64e6c09611e6691791e848d5e3fe410fa5899e7df2e0afd77a99e3","urls":["bzz-raw://db5fbb3dddd8b7047465b62575d96231ba8a2774d37fb4737fbf23340fabbb03","dweb:/ipfs/QmVUSvooZKEdEdap619tcJjTLcAuH6QBdZqAzWwnAXZAWJ"],"license":"MIT"},"src/BitOps.sol":{"keccak256":"0xe6447477342b60f948cb1785c7a723af7da96360887be0e23525604f960f69dc","urls":["bzz-raw://fe79487c0972995f4cf66c3c0c772777270ffd2faf3fdfcc2e5a974f953f6b04","dweb:/ipfs/QmPNrVcbPYr7kLQjczra7CicTvHijPjrvYBFCBEucKYppM"],"license":"Apache-2.0 OR MIT"},"src/Cids.sol":{"keccak256":"0x4a58ce512bbf6ba57a445b6887c8fb244ba1762d0ccc58e88eb25c13448074f9","urls":["bzz-raw://76a20f52425df5023d58eda12d8ddb7fc28839c75213e3275fb7eca4985bbd3d","dweb:/ipfs/QmcY1hevEnzUAN4nbNM2t2j8Hv284Ue9bB6fjQjktE1y6j"],"license":"Apache-2.0 OR MIT"},"src/interfaces/IPDPEvents.sol":{"keccak256":"0xbf68ec912762eea46e7121f579e1b9c8c04f2769a8535c012764db823450d356","urls":["bzz-raw://02523a2fee16374e5c180e309cb91229cb822c8ac0beeeaae9501803d80e361a","dweb:/ipfs/QmVgdVzjHJr77T7hRTqDcASpRHbfzH188743zNZmRSM5aQ"],"license":"MIT"},"src/interfaces/IPDPTypes.sol":{"keccak256":"0x1c5c9eb660a639c30b8b3cc09cc4d4c646935467440281b8c668365e237b6846","urls":["bzz-raw://91c470447e2922976856129e75670c9a4c64dd34c62442fa85a99a67a2add77f","dweb:/ipfs/QmNUXEUnhRRkfKzENo5dR1a1YWsytL7jMkGvxd913mat8t"],"license":"MIT"},"src/interfaces/IPDPVerifier.sol":{"keccak256":"0x9a315b3ac2d8700a75033f948a9f8234fd825ee970eda093dfac1ef920dc2437","urls":["bzz-raw://8de054dd020f346e35cdaff03e7944184e5272a78f80c801b3f879dba2f27dc3","dweb:/ipfs/QmdiVw2f9NmgdDukYfpWmnDPPJvHPikGdMWFzXJiFxRMcc"],"license":"MIT"}},"version":1},"id":54} \ No newline at end of file diff --git a/pdp/contract/README.md b/pdp/contract/README.md new file mode 100644 index 000000000..489519b2d --- /dev/null +++ b/pdp/contract/README.md @@ -0,0 +1,110 @@ +# Guide for Generating Go Bindings Using `abigen` + +This guide explains how to use the `abigen` tool to generate Go bindings for Ethereum smart contracts. These bindings allow you to interact with contracts in Go programs. The smart contract ABIs (Application Binary Interfaces) are retrieved from the source repository and updated after being processed with `make build`. + +--- + +## Prerequisites + +1. **Install `abigen`:** + Install `abigen` from the Go Ethereum (geth) toolset. You can install it via the following command: + + ```bash + go install github.com/ethereum/go-ethereum/cmd/abigen@latest + ``` + +2. **Ensure Forge (`foundry`) is Installed:** + The `make build` step requires the Forge tool (from Foundry). Install it via: + + ```bash + curl -L https://foundry.paradigm.xyz | bash + foundryup + ``` + +3. **Clone the Repository:** + Clone the repository where the smart contract code resides: + + ```bash + git clone https://github.com/FilOzone/pdp.git + cd pdp + ``` + +--- + +## Steps to Generate Go Bindings + +### Step 1: Build the Contracts using `make build` + +In the root of the cloned repository, run: + +```bash +make build +``` + +This command will create the `out/` directory containing the compiled contract artifacts, such as `IPDPProvingSchedule.json` and `PDPVerifier.json`. + +--- + +### Step 2: Extract ABIs from Compiled Artifacts + +Navigate to the `out/` directory and extract the ABI from the compiled JSON files for the required contracts. Use the `jq` tool: + +#### For `IPDPProvingSchedule` ABI: + +Run: + +```bash +jq '.abi' out/IPDPProvingSchedule.sol/IPDPProvingSchedule.json > pdp/contract/IPDPProvingSchedule.abi +``` + +#### For `PDPVerifier` ABI: + +Run: + +```bash +jq '.abi' out/PDPVerifier.sol/PDPVerifier.json > pdp/contract/PDPVerifier.abi +``` + +Ensure that the respective `.abi` files are updated in the `pdp/contract/` directory. + +--- + +### Step 3: Generate Go Bindings Using `abigen` + +Use the `abigen` command-line tool to generate the Go bindings for the parsed ABIs. + +#### For `IPDPProvingSchedule` Contract: + +Run: + +```bash +abigen --abi pdp/contract/IPDPProvingSchedule.abi --pkg contract --type IPDPProvingSchedule --out pdp/contract/pdp_proving_schedule.go +``` + +- `--abi`: Path to the `.abi` file for the contract. +- `--pkg`: Package name in the generated Go code (use the relevant package name, e.g., `contract` in this case). +- `--type`: The Go struct type for this contract (use descriptive names like `IPDPProvingSchedule`). +- `--out`: Output file path for the generated Go file (e.g., `pdp_proving_schedule.go`). + +--- + +#### For `PDPVerifier` Contract: + +Run: + +```bash +abigen --abi pdp/contract/PDPVerifier.abi --pkg contract --type PDPVerifier --out pdp/contract/pdp_verifier.go +``` + +--- + +### Step 4: Verify the Outputs + +After running the `abigen` commands, the Go files (`pdp_proving_schedule.go` and `pdp_verifier.go`) will be generated in the `pdp/contract/` directory. These files contain the Go bindings that can be used in Go applications to interact with the corresponding smart contracts. + +--- + +### Notes + +- **ABI Files:** Ensure that the `.abi` files are correct and up to date by extracting them directly from the compiled JSON artifacts. +- **Code Organization:** Keep both the generated Go files and ABI files in a structured directory layout for easier maintenance (e.g., under `pdp/contract/`). \ No newline at end of file diff --git a/pdp/contract/addresses.go b/pdp/contract/addresses.go index 10292f5be..e39eec488 100644 --- a/pdp/contract/addresses.go +++ b/pdp/contract/addresses.go @@ -11,20 +11,31 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +const PDPMainnet = "0x9C65E8E57C98cCc040A3d825556832EA1e9f4Df6" +const PDPCalibnet = "0x4A6867D8537f83c1cEae02dF9Df2E31a6c5A1bb6" +const PDPTestNet = "0x36BB02036a59147b5062BaF997743923Faef1D9e" + type PDPContracts struct { PDPVerifier common.Address } func ContractAddresses() PDPContracts { + return PDPContracts{ + PDPVerifier: ConfigurePDPAddress(), + } +} + +func ConfigurePDPAddress() common.Address { switch build.BuildType { case build.BuildCalibnet: - return PDPContracts{ - PDPVerifier: common.HexToAddress("0x5A23b7df87f59A291C26A2A1d684AD03Ce9B68DC"), - } + return common.HexToAddress(PDPCalibnet) case build.BuildMainnet: - return PDPContracts{ - PDPVerifier: common.HexToAddress("0x9C65E8E57C98cCc040A3d825556832EA1e9f4Df6"), + return common.HexToAddress(PDPMainnet) + case build.Build2k, build.BuildDebug: + if !common.IsHexAddress(PDPTestNet) { + panic("PDPTestNet not set") } + return common.HexToAddress(PDPTestNet) default: panic("pdp contracts unknown for this network") } diff --git a/pdp/contract/pdp_proving_schedule.go b/pdp/contract/pdp_proving_schedule.go index 650cc5bdb..be0456667 100644 --- a/pdp/contract/pdp_proving_schedule.go +++ b/pdp/contract/pdp_proving_schedule.go @@ -31,7 +31,7 @@ var ( // IPDPProvingScheduleMetaData contains all meta data concerning the IPDPProvingSchedule contract. var IPDPProvingScheduleMetaData = &bind.MetaData{ - ABI: "[{\"type\":\"function\",\"name\":\"challengeWindow\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"getChallengesPerProof\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"getMaxProvingPeriod\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"initChallengeWindowStart\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"nextChallengeWindowStart\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"}]", + ABI: "[{\"type\":\"function\",\"name\":\"getPDPConfig\",\"inputs\":[],\"outputs\":[{\"name\":\"maxProvingPeriod\",\"type\":\"uint64\",\"internalType\":\"uint64\"},{\"name\":\"challengeWindow\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"challengesPerProof\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"initChallengeWindowStart\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextPDPChallengeWindowStart\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"}]", } // IPDPProvingScheduleABI is the input ABI used to generate the binding from. @@ -180,136 +180,67 @@ func (_IPDPProvingSchedule *IPDPProvingScheduleTransactorRaw) Transact(opts *bin return _IPDPProvingSchedule.Contract.contract.Transact(opts, method, params...) } -// ChallengeWindow is a free data retrieval call binding the contract method 0x861a1412. +// GetPDPConfig is a free data retrieval call binding the contract method 0xea0f9354. // -// Solidity: function challengeWindow() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) ChallengeWindow(opts *bind.CallOpts) (*big.Int, error) { +// Solidity: function getPDPConfig() view returns(uint64 maxProvingPeriod, uint256 challengeWindow, uint256 challengesPerProof, uint256 initChallengeWindowStart) +func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) GetPDPConfig(opts *bind.CallOpts) (struct { + MaxProvingPeriod uint64 + ChallengeWindow *big.Int + ChallengesPerProof *big.Int + InitChallengeWindowStart *big.Int +}, error) { var out []interface{} - err := _IPDPProvingSchedule.contract.Call(opts, &out, "challengeWindow") - + err := _IPDPProvingSchedule.contract.Call(opts, &out, "getPDPConfig") + + outstruct := new(struct { + MaxProvingPeriod uint64 + ChallengeWindow *big.Int + ChallengesPerProof *big.Int + InitChallengeWindowStart *big.Int + }) if err != nil { - return *new(*big.Int), err + return *outstruct, err } - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} + outstruct.MaxProvingPeriod = *abi.ConvertType(out[0], new(uint64)).(*uint64) + outstruct.ChallengeWindow = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ChallengesPerProof = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.InitChallengeWindowStart = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) -// ChallengeWindow is a free data retrieval call binding the contract method 0x861a1412. -// -// Solidity: function challengeWindow() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleSession) ChallengeWindow() (*big.Int, error) { - return _IPDPProvingSchedule.Contract.ChallengeWindow(&_IPDPProvingSchedule.CallOpts) -} - -// ChallengeWindow is a free data retrieval call binding the contract method 0x861a1412. -// -// Solidity: function challengeWindow() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) ChallengeWindow() (*big.Int, error) { - return _IPDPProvingSchedule.Contract.ChallengeWindow(&_IPDPProvingSchedule.CallOpts) -} - -// GetChallengesPerProof is a free data retrieval call binding the contract method 0x47d3dfe7. -// -// Solidity: function getChallengesPerProof() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) GetChallengesPerProof(opts *bind.CallOpts) (uint64, error) { - var out []interface{} - err := _IPDPProvingSchedule.contract.Call(opts, &out, "getChallengesPerProof") - - if err != nil { - return *new(uint64), err - } - - out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) - - return out0, err - -} - -// GetChallengesPerProof is a free data retrieval call binding the contract method 0x47d3dfe7. -// -// Solidity: function getChallengesPerProof() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleSession) GetChallengesPerProof() (uint64, error) { - return _IPDPProvingSchedule.Contract.GetChallengesPerProof(&_IPDPProvingSchedule.CallOpts) -} - -// GetChallengesPerProof is a free data retrieval call binding the contract method 0x47d3dfe7. -// -// Solidity: function getChallengesPerProof() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) GetChallengesPerProof() (uint64, error) { - return _IPDPProvingSchedule.Contract.GetChallengesPerProof(&_IPDPProvingSchedule.CallOpts) -} - -// GetMaxProvingPeriod is a free data retrieval call binding the contract method 0xf2f12333. -// -// Solidity: function getMaxProvingPeriod() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) GetMaxProvingPeriod(opts *bind.CallOpts) (uint64, error) { - var out []interface{} - err := _IPDPProvingSchedule.contract.Call(opts, &out, "getMaxProvingPeriod") - - if err != nil { - return *new(uint64), err - } - - out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) - - return out0, err - -} - -// GetMaxProvingPeriod is a free data retrieval call binding the contract method 0xf2f12333. -// -// Solidity: function getMaxProvingPeriod() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleSession) GetMaxProvingPeriod() (uint64, error) { - return _IPDPProvingSchedule.Contract.GetMaxProvingPeriod(&_IPDPProvingSchedule.CallOpts) -} - -// GetMaxProvingPeriod is a free data retrieval call binding the contract method 0xf2f12333. -// -// Solidity: function getMaxProvingPeriod() pure returns(uint64) -func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) GetMaxProvingPeriod() (uint64, error) { - return _IPDPProvingSchedule.Contract.GetMaxProvingPeriod(&_IPDPProvingSchedule.CallOpts) -} - -// InitChallengeWindowStart is a free data retrieval call binding the contract method 0x21918cea. -// -// Solidity: function initChallengeWindowStart() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) InitChallengeWindowStart(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _IPDPProvingSchedule.contract.Call(opts, &out, "initChallengeWindowStart") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err + return *outstruct, err } -// InitChallengeWindowStart is a free data retrieval call binding the contract method 0x21918cea. +// GetPDPConfig is a free data retrieval call binding the contract method 0xea0f9354. // -// Solidity: function initChallengeWindowStart() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleSession) InitChallengeWindowStart() (*big.Int, error) { - return _IPDPProvingSchedule.Contract.InitChallengeWindowStart(&_IPDPProvingSchedule.CallOpts) +// Solidity: function getPDPConfig() view returns(uint64 maxProvingPeriod, uint256 challengeWindow, uint256 challengesPerProof, uint256 initChallengeWindowStart) +func (_IPDPProvingSchedule *IPDPProvingScheduleSession) GetPDPConfig() (struct { + MaxProvingPeriod uint64 + ChallengeWindow *big.Int + ChallengesPerProof *big.Int + InitChallengeWindowStart *big.Int +}, error) { + return _IPDPProvingSchedule.Contract.GetPDPConfig(&_IPDPProvingSchedule.CallOpts) } -// InitChallengeWindowStart is a free data retrieval call binding the contract method 0x21918cea. +// GetPDPConfig is a free data retrieval call binding the contract method 0xea0f9354. // -// Solidity: function initChallengeWindowStart() pure returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) InitChallengeWindowStart() (*big.Int, error) { - return _IPDPProvingSchedule.Contract.InitChallengeWindowStart(&_IPDPProvingSchedule.CallOpts) +// Solidity: function getPDPConfig() view returns(uint64 maxProvingPeriod, uint256 challengeWindow, uint256 challengesPerProof, uint256 initChallengeWindowStart) +func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) GetPDPConfig() (struct { + MaxProvingPeriod uint64 + ChallengeWindow *big.Int + ChallengesPerProof *big.Int + InitChallengeWindowStart *big.Int +}, error) { + return _IPDPProvingSchedule.Contract.GetPDPConfig(&_IPDPProvingSchedule.CallOpts) } -// NextChallengeWindowStart is a free data retrieval call binding the contract method 0x8bf96d28. +// NextPDPChallengeWindowStart is a free data retrieval call binding the contract method 0x11d41294. // -// Solidity: function nextChallengeWindowStart(uint256 setId) view returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) NextChallengeWindowStart(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { +// Solidity: function nextPDPChallengeWindowStart(uint256 setId) view returns(uint256) +func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) NextPDPChallengeWindowStart(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _IPDPProvingSchedule.contract.Call(opts, &out, "nextChallengeWindowStart", setId) + err := _IPDPProvingSchedule.contract.Call(opts, &out, "nextPDPChallengeWindowStart", setId) if err != nil { return *new(*big.Int), err @@ -321,16 +252,16 @@ func (_IPDPProvingSchedule *IPDPProvingScheduleCaller) NextChallengeWindowStart( } -// NextChallengeWindowStart is a free data retrieval call binding the contract method 0x8bf96d28. +// NextPDPChallengeWindowStart is a free data retrieval call binding the contract method 0x11d41294. // -// Solidity: function nextChallengeWindowStart(uint256 setId) view returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleSession) NextChallengeWindowStart(setId *big.Int) (*big.Int, error) { - return _IPDPProvingSchedule.Contract.NextChallengeWindowStart(&_IPDPProvingSchedule.CallOpts, setId) +// Solidity: function nextPDPChallengeWindowStart(uint256 setId) view returns(uint256) +func (_IPDPProvingSchedule *IPDPProvingScheduleSession) NextPDPChallengeWindowStart(setId *big.Int) (*big.Int, error) { + return _IPDPProvingSchedule.Contract.NextPDPChallengeWindowStart(&_IPDPProvingSchedule.CallOpts, setId) } -// NextChallengeWindowStart is a free data retrieval call binding the contract method 0x8bf96d28. +// NextPDPChallengeWindowStart is a free data retrieval call binding the contract method 0x11d41294. // -// Solidity: function nextChallengeWindowStart(uint256 setId) view returns(uint256) -func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) NextChallengeWindowStart(setId *big.Int) (*big.Int, error) { - return _IPDPProvingSchedule.Contract.NextChallengeWindowStart(&_IPDPProvingSchedule.CallOpts, setId) +// Solidity: function nextPDPChallengeWindowStart(uint256 setId) view returns(uint256) +func (_IPDPProvingSchedule *IPDPProvingScheduleCallerSession) NextPDPChallengeWindowStart(setId *big.Int) (*big.Int, error) { + return _IPDPProvingSchedule.Contract.NextPDPChallengeWindowStart(&_IPDPProvingSchedule.CallOpts, setId) } diff --git a/pdp/contract/pdp_verifier.go b/pdp/contract/pdp_verifier.go index 64855170e..e1957bdf0 100644 --- a/pdp/contract/pdp_verifier.go +++ b/pdp/contract/pdp_verifier.go @@ -34,27 +34,21 @@ type CidsCid struct { Data []byte } -// PDPVerifierProof is an auto generated low-level Go binding around an user-defined struct. -type PDPVerifierProof struct { - Leaf [32]byte - Proof [][32]byte -} - -// PDPVerifierRootData is an auto generated low-level Go binding around an user-defined struct. -type PDPVerifierRootData struct { - Root CidsCid - RawSize *big.Int +// IPDPTypesPieceIdAndOffset is an auto generated low-level Go binding around an user-defined struct. +type IPDPTypesPieceIdAndOffset struct { + PieceId *big.Int + Offset *big.Int } -// PDPVerifierRootIdAndOffset is an auto generated low-level Go binding around an user-defined struct. -type PDPVerifierRootIdAndOffset struct { - RootId *big.Int - Offset *big.Int +// IPDPTypesProof is an auto generated low-level Go binding around an user-defined struct. +type IPDPTypesProof struct { + Leaf [32]byte + Proof [][32]byte } // PDPVerifierMetaData contains all meta data concerning the PDPVerifier contract. var PDPVerifierMetaData = &bind.MetaData{ - ABI: "[{\"type\":\"constructor\",\"inputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"BURN_ACTOR\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"EXTRA_DATA_MAX_SIZE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"FIL_USD_PRICE_FEED_ID\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"LEAF_SIZE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"MAX_ENQUEUED_REMOVALS\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"MAX_ROOT_SIZE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"NO_CHALLENGE_SCHEDULED\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"NO_PROVEN_EPOCH\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"PYTH\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractIPyth\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"RANDOMNESS_PRECOMPILE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"SECONDS_IN_DAY\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"UPGRADE_INTERFACE_VERSION\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"addRoots\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootData\",\"type\":\"tuple[]\",\"internalType\":\"structPDPVerifier.RootData[]\",\"components\":[{\"name\":\"root\",\"type\":\"tuple\",\"internalType\":\"structCids.Cid\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]},{\"name\":\"rawSize\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"calculateProofFee\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"estimatedGasFee\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"claimProofSetOwnership\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"createProofSet\",\"inputs\":[{\"name\":\"listenerAddr\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"deleteProofSet\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"findRootIds\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"leafIndexs\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple[]\",\"internalType\":\"structPDPVerifier.RootIdAndOffset[]\",\"components\":[{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"offset\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getChallengeFinality\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getChallengeRange\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getFILUSDPrice\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"},{\"name\":\"\",\"type\":\"int32\",\"internalType\":\"int32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextChallengeEpoch\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextProofSetId\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextRootId\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getProofSetLastProvenEpoch\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getProofSetLeafCount\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getProofSetListener\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getProofSetOwner\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getRandomness\",\"inputs\":[{\"name\":\"epoch\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getRootCid\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structCids.Cid\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getRootLeafCount\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getScheduledRemovals\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_challengeFinality\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"nextProvingPeriod\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"challengeEpoch\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"proofSetLive\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"proposeProofSetOwner\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"provePossession\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"proofs\",\"type\":\"tuple[]\",\"internalType\":\"structPDPVerifier.Proof[]\",\"components\":[{\"name\":\"leaf\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"proof\",\"type\":\"bytes32[]\",\"internalType\":\"bytes32[]\"}]}],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"proxiableUUID\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"rootChallengable\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"rootLive\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"scheduleRemovals\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"rootIds\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"upgradeToAndCall\",\"inputs\":[{\"name\":\"newImplementation\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"event\",\"name\":\"Debug\",\"inputs\":[{\"name\":\"message\",\"type\":\"string\",\"indexed\":false,\"internalType\":\"string\"},{\"name\":\"value\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"NextProvingPeriod\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"challengeEpoch\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"leafCount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"PossessionProven\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"challenges\",\"type\":\"tuple[]\",\"indexed\":false,\"internalType\":\"structPDPVerifier.RootIdAndOffset[]\",\"components\":[{\"name\":\"rootId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"offset\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofFeePaid\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"fee\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"price\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"},{\"name\":\"expo\",\"type\":\"int32\",\"indexed\":false,\"internalType\":\"int32\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofSetCreated\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"owner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofSetDeleted\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"deletedLeafCount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofSetEmpty\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofSetOwnerChanged\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"oldOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"RootsAdded\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"rootIds\",\"type\":\"uint256[]\",\"indexed\":false,\"internalType\":\"uint256[]\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"RootsRemoved\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"rootIds\",\"type\":\"uint256[]\",\"indexed\":false,\"internalType\":\"uint256[]\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Upgraded\",\"inputs\":[{\"name\":\"implementation\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"AddressEmptyCode\",\"inputs\":[{\"name\":\"target\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"ERC1967InvalidImplementation\",\"inputs\":[{\"name\":\"implementation\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"ERC1967NonPayable\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"FailedCall\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"IndexedError\",\"inputs\":[{\"name\":\"idx\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"msg\",\"type\":\"string\",\"internalType\":\"string\"}]},{\"type\":\"error\",\"name\":\"InvalidInitialization\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"NotInitializing\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"OwnableInvalidOwner\",\"inputs\":[{\"name\":\"owner\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"OwnableUnauthorizedAccount\",\"inputs\":[{\"name\":\"account\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"UUPSUnauthorizedCallContext\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"UUPSUnsupportedProxiableUUID\",\"inputs\":[{\"name\":\"slot\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}]}]", + ABI: "[{\"type\":\"constructor\",\"inputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"BURN_ACTOR\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"EXTRA_DATA_MAX_SIZE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"FIL_USD_PRICE_FEED_ID\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"LEAF_SIZE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"MAX_ENQUEUED_REMOVALS\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"MAX_PIECE_SIZE_LOG2\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"NO_CHALLENGE_SCHEDULED\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"NO_PROVEN_EPOCH\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"PYTH\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractIPyth\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"RANDOMNESS_PRECOMPILE\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"SECONDS_IN_DAY\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"UPGRADE_INTERFACE_VERSION\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"VERSION\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"addPieces\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceData\",\"type\":\"tuple[]\",\"internalType\":\"structCids.Cid[]\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"calculateProofFee\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"estimatedGasFee\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"claimDataSetStorageProvider\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"createDataSet\",\"inputs\":[{\"name\":\"listenerAddr\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"dataSetLive\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"deleteDataSet\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"findPieceIds\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"leafIndexs\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple[]\",\"internalType\":\"structIPDPTypes.PieceIdAndOffset[]\",\"components\":[{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"offset\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getActivePieceCount\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"activeCount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getActivePieces\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"offset\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"limit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"pieces\",\"type\":\"tuple[]\",\"internalType\":\"structCids.Cid[]\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]},{\"name\":\"pieceIds\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"},{\"name\":\"rawSizes\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"},{\"name\":\"hasMore\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getChallengeFinality\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getChallengeRange\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getDataSetLastProvenEpoch\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getDataSetLeafCount\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getDataSetListener\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getDataSetStorageProvider\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getFILUSDPrice\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"},{\"name\":\"\",\"type\":\"int32\",\"internalType\":\"int32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextChallengeEpoch\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextDataSetId\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getNextPieceId\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getPieceCid\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structCids.Cid\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getPieceLeafCount\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getRandomness\",\"inputs\":[{\"name\":\"epoch\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getScheduledRemovals\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_challengeFinality\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"migrate\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"nextProvingPeriod\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"challengeEpoch\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"pieceChallengable\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"pieceLive\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"proposeDataSetStorageProvider\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"newStorageProvider\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"provePossession\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"proofs\",\"type\":\"tuple[]\",\"internalType\":\"structIPDPTypes.Proof[]\",\"components\":[{\"name\":\"leaf\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"proof\",\"type\":\"bytes32[]\",\"internalType\":\"bytes32[]\"}]}],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"proxiableUUID\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"schedulePieceDeletions\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"pieceIds\",\"type\":\"uint256[]\",\"internalType\":\"uint256[]\"},{\"name\":\"extraData\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"upgradeToAndCall\",\"inputs\":[{\"name\":\"newImplementation\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"event\",\"name\":\"ContractUpgraded\",\"inputs\":[{\"name\":\"version\",\"type\":\"string\",\"indexed\":false,\"internalType\":\"string\"},{\"name\":\"implementation\",\"type\":\"address\",\"indexed\":false,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DataSetCreated\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"storageProvider\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DataSetDeleted\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"deletedLeafCount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DataSetEmpty\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"NextProvingPeriod\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"challengeEpoch\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"leafCount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"PiecesAdded\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"pieceIds\",\"type\":\"uint256[]\",\"indexed\":false,\"internalType\":\"uint256[]\"},{\"name\":\"pieceCids\",\"type\":\"tuple[]\",\"indexed\":false,\"internalType\":\"structCids.Cid[]\",\"components\":[{\"name\":\"data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}]}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"PiecesRemoved\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"pieceIds\",\"type\":\"uint256[]\",\"indexed\":false,\"internalType\":\"uint256[]\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"PossessionProven\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"challenges\",\"type\":\"tuple[]\",\"indexed\":false,\"internalType\":\"structIPDPTypes.PieceIdAndOffset[]\",\"components\":[{\"name\":\"pieceId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"offset\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ProofFeePaid\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"fee\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"price\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"},{\"name\":\"expo\",\"type\":\"int32\",\"indexed\":false,\"internalType\":\"int32\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"StorageProviderChanged\",\"inputs\":[{\"name\":\"setId\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"oldStorageProvider\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newStorageProvider\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Upgraded\",\"inputs\":[{\"name\":\"implementation\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"AddressEmptyCode\",\"inputs\":[{\"name\":\"target\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"ERC1967InvalidImplementation\",\"inputs\":[{\"name\":\"implementation\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"ERC1967NonPayable\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"FailedCall\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"IndexedError\",\"inputs\":[{\"name\":\"idx\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"msg\",\"type\":\"string\",\"internalType\":\"string\"}]},{\"type\":\"error\",\"name\":\"InvalidInitialization\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"NotInitializing\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"OwnableInvalidOwner\",\"inputs\":[{\"name\":\"owner\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"OwnableUnauthorizedAccount\",\"inputs\":[{\"name\":\"account\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"UUPSUnauthorizedCallContext\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"UUPSUnsupportedProxiableUUID\",\"inputs\":[{\"name\":\"slot\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}]}]", } // PDPVerifierABI is the input ABI used to generate the binding from. @@ -358,12 +352,12 @@ func (_PDPVerifier *PDPVerifierCallerSession) MAXENQUEUEDREMOVALS() (*big.Int, e return _PDPVerifier.Contract.MAXENQUEUEDREMOVALS(&_PDPVerifier.CallOpts) } -// MAXROOTSIZE is a free data retrieval call binding the contract method 0x16e2bcd5. +// MAXPIECESIZELOG2 is a free data retrieval call binding the contract method 0xf8eb8276. // -// Solidity: function MAX_ROOT_SIZE() view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) MAXROOTSIZE(opts *bind.CallOpts) (*big.Int, error) { +// Solidity: function MAX_PIECE_SIZE_LOG2() view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) MAXPIECESIZELOG2(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "MAX_ROOT_SIZE") + err := _PDPVerifier.contract.Call(opts, &out, "MAX_PIECE_SIZE_LOG2") if err != nil { return *new(*big.Int), err @@ -375,18 +369,18 @@ func (_PDPVerifier *PDPVerifierCaller) MAXROOTSIZE(opts *bind.CallOpts) (*big.In } -// MAXROOTSIZE is a free data retrieval call binding the contract method 0x16e2bcd5. +// MAXPIECESIZELOG2 is a free data retrieval call binding the contract method 0xf8eb8276. // -// Solidity: function MAX_ROOT_SIZE() view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) MAXROOTSIZE() (*big.Int, error) { - return _PDPVerifier.Contract.MAXROOTSIZE(&_PDPVerifier.CallOpts) +// Solidity: function MAX_PIECE_SIZE_LOG2() view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) MAXPIECESIZELOG2() (*big.Int, error) { + return _PDPVerifier.Contract.MAXPIECESIZELOG2(&_PDPVerifier.CallOpts) } -// MAXROOTSIZE is a free data retrieval call binding the contract method 0x16e2bcd5. +// MAXPIECESIZELOG2 is a free data retrieval call binding the contract method 0xf8eb8276. // -// Solidity: function MAX_ROOT_SIZE() view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) MAXROOTSIZE() (*big.Int, error) { - return _PDPVerifier.Contract.MAXROOTSIZE(&_PDPVerifier.CallOpts) +// Solidity: function MAX_PIECE_SIZE_LOG2() view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) MAXPIECESIZELOG2() (*big.Int, error) { + return _PDPVerifier.Contract.MAXPIECESIZELOG2(&_PDPVerifier.CallOpts) } // NOCHALLENGESCHEDULED is a free data retrieval call binding the contract method 0x462dd449. @@ -575,6 +569,37 @@ func (_PDPVerifier *PDPVerifierCallerSession) UPGRADEINTERFACEVERSION() (string, return _PDPVerifier.Contract.UPGRADEINTERFACEVERSION(&_PDPVerifier.CallOpts) } +// VERSION is a free data retrieval call binding the contract method 0xffa1ad74. +// +// Solidity: function VERSION() view returns(string) +func (_PDPVerifier *PDPVerifierCaller) VERSION(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _PDPVerifier.contract.Call(opts, &out, "VERSION") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// VERSION is a free data retrieval call binding the contract method 0xffa1ad74. +// +// Solidity: function VERSION() view returns(string) +func (_PDPVerifier *PDPVerifierSession) VERSION() (string, error) { + return _PDPVerifier.Contract.VERSION(&_PDPVerifier.CallOpts) +} + +// VERSION is a free data retrieval call binding the contract method 0xffa1ad74. +// +// Solidity: function VERSION() view returns(string) +func (_PDPVerifier *PDPVerifierCallerSession) VERSION() (string, error) { + return _PDPVerifier.Contract.VERSION(&_PDPVerifier.CallOpts) +} + // CalculateProofFee is a free data retrieval call binding the contract method 0x4903704a. // // Solidity: function calculateProofFee(uint256 setId, uint256 estimatedGasFee) view returns(uint256) @@ -606,35 +631,152 @@ func (_PDPVerifier *PDPVerifierCallerSession) CalculateProofFee(setId *big.Int, return _PDPVerifier.Contract.CalculateProofFee(&_PDPVerifier.CallOpts, setId, estimatedGasFee) } -// FindRootIds is a free data retrieval call binding the contract method 0x0528a55b. +// DataSetLive is a free data retrieval call binding the contract method 0xca759f27. +// +// Solidity: function dataSetLive(uint256 setId) view returns(bool) +func (_PDPVerifier *PDPVerifierCaller) DataSetLive(opts *bind.CallOpts, setId *big.Int) (bool, error) { + var out []interface{} + err := _PDPVerifier.contract.Call(opts, &out, "dataSetLive", setId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// DataSetLive is a free data retrieval call binding the contract method 0xca759f27. +// +// Solidity: function dataSetLive(uint256 setId) view returns(bool) +func (_PDPVerifier *PDPVerifierSession) DataSetLive(setId *big.Int) (bool, error) { + return _PDPVerifier.Contract.DataSetLive(&_PDPVerifier.CallOpts, setId) +} + +// DataSetLive is a free data retrieval call binding the contract method 0xca759f27. +// +// Solidity: function dataSetLive(uint256 setId) view returns(bool) +func (_PDPVerifier *PDPVerifierCallerSession) DataSetLive(setId *big.Int) (bool, error) { + return _PDPVerifier.Contract.DataSetLive(&_PDPVerifier.CallOpts, setId) +} + +// FindPieceIds is a free data retrieval call binding the contract method 0x349c9179. +// +// Solidity: function findPieceIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) +func (_PDPVerifier *PDPVerifierCaller) FindPieceIds(opts *bind.CallOpts, setId *big.Int, leafIndexs []*big.Int) ([]IPDPTypesPieceIdAndOffset, error) { + var out []interface{} + err := _PDPVerifier.contract.Call(opts, &out, "findPieceIds", setId, leafIndexs) + + if err != nil { + return *new([]IPDPTypesPieceIdAndOffset), err + } + + out0 := *abi.ConvertType(out[0], new([]IPDPTypesPieceIdAndOffset)).(*[]IPDPTypesPieceIdAndOffset) + + return out0, err + +} + +// FindPieceIds is a free data retrieval call binding the contract method 0x349c9179. +// +// Solidity: function findPieceIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) +func (_PDPVerifier *PDPVerifierSession) FindPieceIds(setId *big.Int, leafIndexs []*big.Int) ([]IPDPTypesPieceIdAndOffset, error) { + return _PDPVerifier.Contract.FindPieceIds(&_PDPVerifier.CallOpts, setId, leafIndexs) +} + +// FindPieceIds is a free data retrieval call binding the contract method 0x349c9179. +// +// Solidity: function findPieceIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) +func (_PDPVerifier *PDPVerifierCallerSession) FindPieceIds(setId *big.Int, leafIndexs []*big.Int) ([]IPDPTypesPieceIdAndOffset, error) { + return _PDPVerifier.Contract.FindPieceIds(&_PDPVerifier.CallOpts, setId, leafIndexs) +} + +// GetActivePieceCount is a free data retrieval call binding the contract method 0x5353bdfd. // -// Solidity: function findRootIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) -func (_PDPVerifier *PDPVerifierCaller) FindRootIds(opts *bind.CallOpts, setId *big.Int, leafIndexs []*big.Int) ([]PDPVerifierRootIdAndOffset, error) { +// Solidity: function getActivePieceCount(uint256 setId) view returns(uint256 activeCount) +func (_PDPVerifier *PDPVerifierCaller) GetActivePieceCount(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "findRootIds", setId, leafIndexs) + err := _PDPVerifier.contract.Call(opts, &out, "getActivePieceCount", setId) if err != nil { - return *new([]PDPVerifierRootIdAndOffset), err + return *new(*big.Int), err } - out0 := *abi.ConvertType(out[0], new([]PDPVerifierRootIdAndOffset)).(*[]PDPVerifierRootIdAndOffset) + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) return out0, err } -// FindRootIds is a free data retrieval call binding the contract method 0x0528a55b. +// GetActivePieceCount is a free data retrieval call binding the contract method 0x5353bdfd. +// +// Solidity: function getActivePieceCount(uint256 setId) view returns(uint256 activeCount) +func (_PDPVerifier *PDPVerifierSession) GetActivePieceCount(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetActivePieceCount(&_PDPVerifier.CallOpts, setId) +} + +// GetActivePieceCount is a free data retrieval call binding the contract method 0x5353bdfd. +// +// Solidity: function getActivePieceCount(uint256 setId) view returns(uint256 activeCount) +func (_PDPVerifier *PDPVerifierCallerSession) GetActivePieceCount(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetActivePieceCount(&_PDPVerifier.CallOpts, setId) +} + +// GetActivePieces is a free data retrieval call binding the contract method 0x39f51544. +// +// Solidity: function getActivePieces(uint256 setId, uint256 offset, uint256 limit) view returns((bytes)[] pieces, uint256[] pieceIds, uint256[] rawSizes, bool hasMore) +func (_PDPVerifier *PDPVerifierCaller) GetActivePieces(opts *bind.CallOpts, setId *big.Int, offset *big.Int, limit *big.Int) (struct { + Pieces []CidsCid + PieceIds []*big.Int + RawSizes []*big.Int + HasMore bool +}, error) { + var out []interface{} + err := _PDPVerifier.contract.Call(opts, &out, "getActivePieces", setId, offset, limit) + + outstruct := new(struct { + Pieces []CidsCid + PieceIds []*big.Int + RawSizes []*big.Int + HasMore bool + }) + if err != nil { + return *outstruct, err + } + + outstruct.Pieces = *abi.ConvertType(out[0], new([]CidsCid)).(*[]CidsCid) + outstruct.PieceIds = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int) + outstruct.RawSizes = *abi.ConvertType(out[2], new([]*big.Int)).(*[]*big.Int) + outstruct.HasMore = *abi.ConvertType(out[3], new(bool)).(*bool) + + return *outstruct, err + +} + +// GetActivePieces is a free data retrieval call binding the contract method 0x39f51544. // -// Solidity: function findRootIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) -func (_PDPVerifier *PDPVerifierSession) FindRootIds(setId *big.Int, leafIndexs []*big.Int) ([]PDPVerifierRootIdAndOffset, error) { - return _PDPVerifier.Contract.FindRootIds(&_PDPVerifier.CallOpts, setId, leafIndexs) +// Solidity: function getActivePieces(uint256 setId, uint256 offset, uint256 limit) view returns((bytes)[] pieces, uint256[] pieceIds, uint256[] rawSizes, bool hasMore) +func (_PDPVerifier *PDPVerifierSession) GetActivePieces(setId *big.Int, offset *big.Int, limit *big.Int) (struct { + Pieces []CidsCid + PieceIds []*big.Int + RawSizes []*big.Int + HasMore bool +}, error) { + return _PDPVerifier.Contract.GetActivePieces(&_PDPVerifier.CallOpts, setId, offset, limit) } -// FindRootIds is a free data retrieval call binding the contract method 0x0528a55b. +// GetActivePieces is a free data retrieval call binding the contract method 0x39f51544. // -// Solidity: function findRootIds(uint256 setId, uint256[] leafIndexs) view returns((uint256,uint256)[]) -func (_PDPVerifier *PDPVerifierCallerSession) FindRootIds(setId *big.Int, leafIndexs []*big.Int) ([]PDPVerifierRootIdAndOffset, error) { - return _PDPVerifier.Contract.FindRootIds(&_PDPVerifier.CallOpts, setId, leafIndexs) +// Solidity: function getActivePieces(uint256 setId, uint256 offset, uint256 limit) view returns((bytes)[] pieces, uint256[] pieceIds, uint256[] rawSizes, bool hasMore) +func (_PDPVerifier *PDPVerifierCallerSession) GetActivePieces(setId *big.Int, offset *big.Int, limit *big.Int) (struct { + Pieces []CidsCid + PieceIds []*big.Int + RawSizes []*big.Int + HasMore bool +}, error) { + return _PDPVerifier.Contract.GetActivePieces(&_PDPVerifier.CallOpts, setId, offset, limit) } // GetChallengeFinality is a free data retrieval call binding the contract method 0xf83758fe. @@ -699,44 +841,43 @@ func (_PDPVerifier *PDPVerifierCallerSession) GetChallengeRange(setId *big.Int) return _PDPVerifier.Contract.GetChallengeRange(&_PDPVerifier.CallOpts, setId) } -// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. +// GetDataSetLastProvenEpoch is a free data retrieval call binding the contract method 0x04595c1a. // -// Solidity: function getFILUSDPrice() view returns(uint64, int32) -func (_PDPVerifier *PDPVerifierCaller) GetFILUSDPrice(opts *bind.CallOpts) (uint64, int32, error) { +// Solidity: function getDataSetLastProvenEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetDataSetLastProvenEpoch(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getFILUSDPrice") + err := _PDPVerifier.contract.Call(opts, &out, "getDataSetLastProvenEpoch", setId) if err != nil { - return *new(uint64), *new(int32), err + return *new(*big.Int), err } - out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) - out1 := *abi.ConvertType(out[1], new(int32)).(*int32) + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - return out0, out1, err + return out0, err } -// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. +// GetDataSetLastProvenEpoch is a free data retrieval call binding the contract method 0x04595c1a. // -// Solidity: function getFILUSDPrice() view returns(uint64, int32) -func (_PDPVerifier *PDPVerifierSession) GetFILUSDPrice() (uint64, int32, error) { - return _PDPVerifier.Contract.GetFILUSDPrice(&_PDPVerifier.CallOpts) +// Solidity: function getDataSetLastProvenEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetDataSetLastProvenEpoch(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetDataSetLastProvenEpoch(&_PDPVerifier.CallOpts, setId) } -// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. +// GetDataSetLastProvenEpoch is a free data retrieval call binding the contract method 0x04595c1a. // -// Solidity: function getFILUSDPrice() view returns(uint64, int32) -func (_PDPVerifier *PDPVerifierCallerSession) GetFILUSDPrice() (uint64, int32, error) { - return _PDPVerifier.Contract.GetFILUSDPrice(&_PDPVerifier.CallOpts) +// Solidity: function getDataSetLastProvenEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetDataSetLastProvenEpoch(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetDataSetLastProvenEpoch(&_PDPVerifier.CallOpts, setId) } -// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. +// GetDataSetLeafCount is a free data retrieval call binding the contract method 0xa531998c. // -// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetNextChallengeEpoch(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { +// Solidity: function getDataSetLeafCount(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetDataSetLeafCount(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getNextChallengeEpoch", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getDataSetLeafCount", setId) if err != nil { return *new(*big.Int), err @@ -748,119 +889,121 @@ func (_PDPVerifier *PDPVerifierCaller) GetNextChallengeEpoch(opts *bind.CallOpts } -// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. +// GetDataSetLeafCount is a free data retrieval call binding the contract method 0xa531998c. // -// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetNextChallengeEpoch(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetNextChallengeEpoch(&_PDPVerifier.CallOpts, setId) +// Solidity: function getDataSetLeafCount(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetDataSetLeafCount(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetDataSetLeafCount(&_PDPVerifier.CallOpts, setId) } -// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. +// GetDataSetLeafCount is a free data retrieval call binding the contract method 0xa531998c. // -// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetNextChallengeEpoch(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetNextChallengeEpoch(&_PDPVerifier.CallOpts, setId) +// Solidity: function getDataSetLeafCount(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetDataSetLeafCount(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetDataSetLeafCount(&_PDPVerifier.CallOpts, setId) } -// GetNextProofSetId is a free data retrieval call binding the contract method 0x8ea417e5. +// GetDataSetListener is a free data retrieval call binding the contract method 0x2b3129bb. // -// Solidity: function getNextProofSetId() view returns(uint64) -func (_PDPVerifier *PDPVerifierCaller) GetNextProofSetId(opts *bind.CallOpts) (uint64, error) { +// Solidity: function getDataSetListener(uint256 setId) view returns(address) +func (_PDPVerifier *PDPVerifierCaller) GetDataSetListener(opts *bind.CallOpts, setId *big.Int) (common.Address, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getNextProofSetId") + err := _PDPVerifier.contract.Call(opts, &out, "getDataSetListener", setId) if err != nil { - return *new(uint64), err + return *new(common.Address), err } - out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) return out0, err } -// GetNextProofSetId is a free data retrieval call binding the contract method 0x8ea417e5. +// GetDataSetListener is a free data retrieval call binding the contract method 0x2b3129bb. // -// Solidity: function getNextProofSetId() view returns(uint64) -func (_PDPVerifier *PDPVerifierSession) GetNextProofSetId() (uint64, error) { - return _PDPVerifier.Contract.GetNextProofSetId(&_PDPVerifier.CallOpts) +// Solidity: function getDataSetListener(uint256 setId) view returns(address) +func (_PDPVerifier *PDPVerifierSession) GetDataSetListener(setId *big.Int) (common.Address, error) { + return _PDPVerifier.Contract.GetDataSetListener(&_PDPVerifier.CallOpts, setId) } -// GetNextProofSetId is a free data retrieval call binding the contract method 0x8ea417e5. +// GetDataSetListener is a free data retrieval call binding the contract method 0x2b3129bb. // -// Solidity: function getNextProofSetId() view returns(uint64) -func (_PDPVerifier *PDPVerifierCallerSession) GetNextProofSetId() (uint64, error) { - return _PDPVerifier.Contract.GetNextProofSetId(&_PDPVerifier.CallOpts) +// Solidity: function getDataSetListener(uint256 setId) view returns(address) +func (_PDPVerifier *PDPVerifierCallerSession) GetDataSetListener(setId *big.Int) (common.Address, error) { + return _PDPVerifier.Contract.GetDataSetListener(&_PDPVerifier.CallOpts, setId) } -// GetNextRootId is a free data retrieval call binding the contract method 0xd49245c1. +// GetDataSetStorageProvider is a free data retrieval call binding the contract method 0x21b7cd1c. // -// Solidity: function getNextRootId(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetNextRootId(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { +// Solidity: function getDataSetStorageProvider(uint256 setId) view returns(address, address) +func (_PDPVerifier *PDPVerifierCaller) GetDataSetStorageProvider(opts *bind.CallOpts, setId *big.Int) (common.Address, common.Address, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getNextRootId", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getDataSetStorageProvider", setId) if err != nil { - return *new(*big.Int), err + return *new(common.Address), *new(common.Address), err } - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out1 := *abi.ConvertType(out[1], new(common.Address)).(*common.Address) - return out0, err + return out0, out1, err } -// GetNextRootId is a free data retrieval call binding the contract method 0xd49245c1. +// GetDataSetStorageProvider is a free data retrieval call binding the contract method 0x21b7cd1c. // -// Solidity: function getNextRootId(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetNextRootId(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetNextRootId(&_PDPVerifier.CallOpts, setId) +// Solidity: function getDataSetStorageProvider(uint256 setId) view returns(address, address) +func (_PDPVerifier *PDPVerifierSession) GetDataSetStorageProvider(setId *big.Int) (common.Address, common.Address, error) { + return _PDPVerifier.Contract.GetDataSetStorageProvider(&_PDPVerifier.CallOpts, setId) } -// GetNextRootId is a free data retrieval call binding the contract method 0xd49245c1. +// GetDataSetStorageProvider is a free data retrieval call binding the contract method 0x21b7cd1c. // -// Solidity: function getNextRootId(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetNextRootId(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetNextRootId(&_PDPVerifier.CallOpts, setId) +// Solidity: function getDataSetStorageProvider(uint256 setId) view returns(address, address) +func (_PDPVerifier *PDPVerifierCallerSession) GetDataSetStorageProvider(setId *big.Int) (common.Address, common.Address, error) { + return _PDPVerifier.Contract.GetDataSetStorageProvider(&_PDPVerifier.CallOpts, setId) } -// GetProofSetLastProvenEpoch is a free data retrieval call binding the contract method 0xfaa67163. +// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. // -// Solidity: function getProofSetLastProvenEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetProofSetLastProvenEpoch(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { +// Solidity: function getFILUSDPrice() view returns(uint64, int32) +func (_PDPVerifier *PDPVerifierCaller) GetFILUSDPrice(opts *bind.CallOpts) (uint64, int32, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getProofSetLastProvenEpoch", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getFILUSDPrice") if err != nil { - return *new(*big.Int), err + return *new(uint64), *new(int32), err } - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + out1 := *abi.ConvertType(out[1], new(int32)).(*int32) - return out0, err + return out0, out1, err } -// GetProofSetLastProvenEpoch is a free data retrieval call binding the contract method 0xfaa67163. +// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. // -// Solidity: function getProofSetLastProvenEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetProofSetLastProvenEpoch(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetProofSetLastProvenEpoch(&_PDPVerifier.CallOpts, setId) +// Solidity: function getFILUSDPrice() view returns(uint64, int32) +func (_PDPVerifier *PDPVerifierSession) GetFILUSDPrice() (uint64, int32, error) { + return _PDPVerifier.Contract.GetFILUSDPrice(&_PDPVerifier.CallOpts) } -// GetProofSetLastProvenEpoch is a free data retrieval call binding the contract method 0xfaa67163. +// GetFILUSDPrice is a free data retrieval call binding the contract method 0x4fa27920. // -// Solidity: function getProofSetLastProvenEpoch(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetProofSetLastProvenEpoch(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetProofSetLastProvenEpoch(&_PDPVerifier.CallOpts, setId) +// Solidity: function getFILUSDPrice() view returns(uint64, int32) +func (_PDPVerifier *PDPVerifierCallerSession) GetFILUSDPrice() (uint64, int32, error) { + return _PDPVerifier.Contract.GetFILUSDPrice(&_PDPVerifier.CallOpts) } -// GetProofSetLeafCount is a free data retrieval call binding the contract method 0x3f84135f. +// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. // -// Solidity: function getProofSetLeafCount(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetProofSetLeafCount(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { +// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetNextChallengeEpoch(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getProofSetLeafCount", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getNextChallengeEpoch", setId) if err != nil { return *new(*big.Int), err @@ -872,151 +1015,150 @@ func (_PDPVerifier *PDPVerifierCaller) GetProofSetLeafCount(opts *bind.CallOpts, } -// GetProofSetLeafCount is a free data retrieval call binding the contract method 0x3f84135f. +// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. // -// Solidity: function getProofSetLeafCount(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetProofSetLeafCount(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetProofSetLeafCount(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetNextChallengeEpoch(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetNextChallengeEpoch(&_PDPVerifier.CallOpts, setId) } -// GetProofSetLeafCount is a free data retrieval call binding the contract method 0x3f84135f. +// GetNextChallengeEpoch is a free data retrieval call binding the contract method 0x6ba4608f. // -// Solidity: function getProofSetLeafCount(uint256 setId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetProofSetLeafCount(setId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetProofSetLeafCount(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextChallengeEpoch(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetNextChallengeEpoch(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetNextChallengeEpoch(&_PDPVerifier.CallOpts, setId) } -// GetProofSetListener is a free data retrieval call binding the contract method 0x31601226. +// GetNextDataSetId is a free data retrieval call binding the contract method 0x442cded3. // -// Solidity: function getProofSetListener(uint256 setId) view returns(address) -func (_PDPVerifier *PDPVerifierCaller) GetProofSetListener(opts *bind.CallOpts, setId *big.Int) (common.Address, error) { +// Solidity: function getNextDataSetId() view returns(uint64) +func (_PDPVerifier *PDPVerifierCaller) GetNextDataSetId(opts *bind.CallOpts) (uint64, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getProofSetListener", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getNextDataSetId") if err != nil { - return *new(common.Address), err + return *new(uint64), err } - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) return out0, err } -// GetProofSetListener is a free data retrieval call binding the contract method 0x31601226. +// GetNextDataSetId is a free data retrieval call binding the contract method 0x442cded3. // -// Solidity: function getProofSetListener(uint256 setId) view returns(address) -func (_PDPVerifier *PDPVerifierSession) GetProofSetListener(setId *big.Int) (common.Address, error) { - return _PDPVerifier.Contract.GetProofSetListener(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextDataSetId() view returns(uint64) +func (_PDPVerifier *PDPVerifierSession) GetNextDataSetId() (uint64, error) { + return _PDPVerifier.Contract.GetNextDataSetId(&_PDPVerifier.CallOpts) } -// GetProofSetListener is a free data retrieval call binding the contract method 0x31601226. +// GetNextDataSetId is a free data retrieval call binding the contract method 0x442cded3. // -// Solidity: function getProofSetListener(uint256 setId) view returns(address) -func (_PDPVerifier *PDPVerifierCallerSession) GetProofSetListener(setId *big.Int) (common.Address, error) { - return _PDPVerifier.Contract.GetProofSetListener(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextDataSetId() view returns(uint64) +func (_PDPVerifier *PDPVerifierCallerSession) GetNextDataSetId() (uint64, error) { + return _PDPVerifier.Contract.GetNextDataSetId(&_PDPVerifier.CallOpts) } -// GetProofSetOwner is a free data retrieval call binding the contract method 0x4726075b. +// GetNextPieceId is a free data retrieval call binding the contract method 0x1c5ae80f. // -// Solidity: function getProofSetOwner(uint256 setId) view returns(address, address) -func (_PDPVerifier *PDPVerifierCaller) GetProofSetOwner(opts *bind.CallOpts, setId *big.Int) (common.Address, common.Address, error) { +// Solidity: function getNextPieceId(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetNextPieceId(opts *bind.CallOpts, setId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getProofSetOwner", setId) + err := _PDPVerifier.contract.Call(opts, &out, "getNextPieceId", setId) if err != nil { - return *new(common.Address), *new(common.Address), err + return *new(*big.Int), err } - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - out1 := *abi.ConvertType(out[1], new(common.Address)).(*common.Address) + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - return out0, out1, err + return out0, err } -// GetProofSetOwner is a free data retrieval call binding the contract method 0x4726075b. +// GetNextPieceId is a free data retrieval call binding the contract method 0x1c5ae80f. // -// Solidity: function getProofSetOwner(uint256 setId) view returns(address, address) -func (_PDPVerifier *PDPVerifierSession) GetProofSetOwner(setId *big.Int) (common.Address, common.Address, error) { - return _PDPVerifier.Contract.GetProofSetOwner(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextPieceId(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetNextPieceId(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetNextPieceId(&_PDPVerifier.CallOpts, setId) } -// GetProofSetOwner is a free data retrieval call binding the contract method 0x4726075b. +// GetNextPieceId is a free data retrieval call binding the contract method 0x1c5ae80f. // -// Solidity: function getProofSetOwner(uint256 setId) view returns(address, address) -func (_PDPVerifier *PDPVerifierCallerSession) GetProofSetOwner(setId *big.Int) (common.Address, common.Address, error) { - return _PDPVerifier.Contract.GetProofSetOwner(&_PDPVerifier.CallOpts, setId) +// Solidity: function getNextPieceId(uint256 setId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetNextPieceId(setId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetNextPieceId(&_PDPVerifier.CallOpts, setId) } -// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. +// GetPieceCid is a free data retrieval call binding the contract method 0x25bbbedf. // -// Solidity: function getRandomness(uint256 epoch) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetRandomness(opts *bind.CallOpts, epoch *big.Int) (*big.Int, error) { +// Solidity: function getPieceCid(uint256 setId, uint256 pieceId) view returns((bytes)) +func (_PDPVerifier *PDPVerifierCaller) GetPieceCid(opts *bind.CallOpts, setId *big.Int, pieceId *big.Int) (CidsCid, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getRandomness", epoch) + err := _PDPVerifier.contract.Call(opts, &out, "getPieceCid", setId, pieceId) if err != nil { - return *new(*big.Int), err + return *new(CidsCid), err } - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out0 := *abi.ConvertType(out[0], new(CidsCid)).(*CidsCid) return out0, err } -// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. +// GetPieceCid is a free data retrieval call binding the contract method 0x25bbbedf. // -// Solidity: function getRandomness(uint256 epoch) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetRandomness(epoch *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetRandomness(&_PDPVerifier.CallOpts, epoch) +// Solidity: function getPieceCid(uint256 setId, uint256 pieceId) view returns((bytes)) +func (_PDPVerifier *PDPVerifierSession) GetPieceCid(setId *big.Int, pieceId *big.Int) (CidsCid, error) { + return _PDPVerifier.Contract.GetPieceCid(&_PDPVerifier.CallOpts, setId, pieceId) } -// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. +// GetPieceCid is a free data retrieval call binding the contract method 0x25bbbedf. // -// Solidity: function getRandomness(uint256 epoch) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetRandomness(epoch *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetRandomness(&_PDPVerifier.CallOpts, epoch) +// Solidity: function getPieceCid(uint256 setId, uint256 pieceId) view returns((bytes)) +func (_PDPVerifier *PDPVerifierCallerSession) GetPieceCid(setId *big.Int, pieceId *big.Int) (CidsCid, error) { + return _PDPVerifier.Contract.GetPieceCid(&_PDPVerifier.CallOpts, setId, pieceId) } -// GetRootCid is a free data retrieval call binding the contract method 0x3b7ae913. +// GetPieceLeafCount is a free data retrieval call binding the contract method 0x0cd7b880. // -// Solidity: function getRootCid(uint256 setId, uint256 rootId) view returns((bytes)) -func (_PDPVerifier *PDPVerifierCaller) GetRootCid(opts *bind.CallOpts, setId *big.Int, rootId *big.Int) (CidsCid, error) { +// Solidity: function getPieceLeafCount(uint256 setId, uint256 pieceId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetPieceLeafCount(opts *bind.CallOpts, setId *big.Int, pieceId *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getRootCid", setId, rootId) + err := _PDPVerifier.contract.Call(opts, &out, "getPieceLeafCount", setId, pieceId) if err != nil { - return *new(CidsCid), err + return *new(*big.Int), err } - out0 := *abi.ConvertType(out[0], new(CidsCid)).(*CidsCid) + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) return out0, err } -// GetRootCid is a free data retrieval call binding the contract method 0x3b7ae913. +// GetPieceLeafCount is a free data retrieval call binding the contract method 0x0cd7b880. // -// Solidity: function getRootCid(uint256 setId, uint256 rootId) view returns((bytes)) -func (_PDPVerifier *PDPVerifierSession) GetRootCid(setId *big.Int, rootId *big.Int) (CidsCid, error) { - return _PDPVerifier.Contract.GetRootCid(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function getPieceLeafCount(uint256 setId, uint256 pieceId) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetPieceLeafCount(setId *big.Int, pieceId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetPieceLeafCount(&_PDPVerifier.CallOpts, setId, pieceId) } -// GetRootCid is a free data retrieval call binding the contract method 0x3b7ae913. +// GetPieceLeafCount is a free data retrieval call binding the contract method 0x0cd7b880. // -// Solidity: function getRootCid(uint256 setId, uint256 rootId) view returns((bytes)) -func (_PDPVerifier *PDPVerifierCallerSession) GetRootCid(setId *big.Int, rootId *big.Int) (CidsCid, error) { - return _PDPVerifier.Contract.GetRootCid(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function getPieceLeafCount(uint256 setId, uint256 pieceId) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetPieceLeafCount(setId *big.Int, pieceId *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetPieceLeafCount(&_PDPVerifier.CallOpts, setId, pieceId) } -// GetRootLeafCount is a free data retrieval call binding the contract method 0x9153e64b. +// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. // -// Solidity: function getRootLeafCount(uint256 setId, uint256 rootId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCaller) GetRootLeafCount(opts *bind.CallOpts, setId *big.Int, rootId *big.Int) (*big.Int, error) { +// Solidity: function getRandomness(uint256 epoch) view returns(uint256) +func (_PDPVerifier *PDPVerifierCaller) GetRandomness(opts *bind.CallOpts, epoch *big.Int) (*big.Int, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "getRootLeafCount", setId, rootId) + err := _PDPVerifier.contract.Call(opts, &out, "getRandomness", epoch) if err != nil { return *new(*big.Int), err @@ -1028,18 +1170,18 @@ func (_PDPVerifier *PDPVerifierCaller) GetRootLeafCount(opts *bind.CallOpts, set } -// GetRootLeafCount is a free data retrieval call binding the contract method 0x9153e64b. +// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. // -// Solidity: function getRootLeafCount(uint256 setId, uint256 rootId) view returns(uint256) -func (_PDPVerifier *PDPVerifierSession) GetRootLeafCount(setId *big.Int, rootId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetRootLeafCount(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function getRandomness(uint256 epoch) view returns(uint256) +func (_PDPVerifier *PDPVerifierSession) GetRandomness(epoch *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetRandomness(&_PDPVerifier.CallOpts, epoch) } -// GetRootLeafCount is a free data retrieval call binding the contract method 0x9153e64b. +// GetRandomness is a free data retrieval call binding the contract method 0x453f4f62. // -// Solidity: function getRootLeafCount(uint256 setId, uint256 rootId) view returns(uint256) -func (_PDPVerifier *PDPVerifierCallerSession) GetRootLeafCount(setId *big.Int, rootId *big.Int) (*big.Int, error) { - return _PDPVerifier.Contract.GetRootLeafCount(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function getRandomness(uint256 epoch) view returns(uint256) +func (_PDPVerifier *PDPVerifierCallerSession) GetRandomness(epoch *big.Int) (*big.Int, error) { + return _PDPVerifier.Contract.GetRandomness(&_PDPVerifier.CallOpts, epoch) } // GetScheduledRemovals is a free data retrieval call binding the contract method 0x6fa44692. @@ -1104,12 +1246,12 @@ func (_PDPVerifier *PDPVerifierCallerSession) Owner() (common.Address, error) { return _PDPVerifier.Contract.Owner(&_PDPVerifier.CallOpts) } -// ProofSetLive is a free data retrieval call binding the contract method 0xf5cac1ba. +// PieceChallengable is a free data retrieval call binding the contract method 0xdc635266. // -// Solidity: function proofSetLive(uint256 setId) view returns(bool) -func (_PDPVerifier *PDPVerifierCaller) ProofSetLive(opts *bind.CallOpts, setId *big.Int) (bool, error) { +// Solidity: function pieceChallengable(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierCaller) PieceChallengable(opts *bind.CallOpts, setId *big.Int, pieceId *big.Int) (bool, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "proofSetLive", setId) + err := _PDPVerifier.contract.Call(opts, &out, "pieceChallengable", setId, pieceId) if err != nil { return *new(bool), err @@ -1121,57 +1263,26 @@ func (_PDPVerifier *PDPVerifierCaller) ProofSetLive(opts *bind.CallOpts, setId * } -// ProofSetLive is a free data retrieval call binding the contract method 0xf5cac1ba. -// -// Solidity: function proofSetLive(uint256 setId) view returns(bool) -func (_PDPVerifier *PDPVerifierSession) ProofSetLive(setId *big.Int) (bool, error) { - return _PDPVerifier.Contract.ProofSetLive(&_PDPVerifier.CallOpts, setId) -} - -// ProofSetLive is a free data retrieval call binding the contract method 0xf5cac1ba. -// -// Solidity: function proofSetLive(uint256 setId) view returns(bool) -func (_PDPVerifier *PDPVerifierCallerSession) ProofSetLive(setId *big.Int) (bool, error) { - return _PDPVerifier.Contract.ProofSetLive(&_PDPVerifier.CallOpts, setId) -} - -// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. -// -// Solidity: function proxiableUUID() view returns(bytes32) -func (_PDPVerifier *PDPVerifierCaller) ProxiableUUID(opts *bind.CallOpts) ([32]byte, error) { - var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "proxiableUUID") - - if err != nil { - return *new([32]byte), err - } - - out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) - - return out0, err - -} - -// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. +// PieceChallengable is a free data retrieval call binding the contract method 0xdc635266. // -// Solidity: function proxiableUUID() view returns(bytes32) -func (_PDPVerifier *PDPVerifierSession) ProxiableUUID() ([32]byte, error) { - return _PDPVerifier.Contract.ProxiableUUID(&_PDPVerifier.CallOpts) +// Solidity: function pieceChallengable(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierSession) PieceChallengable(setId *big.Int, pieceId *big.Int) (bool, error) { + return _PDPVerifier.Contract.PieceChallengable(&_PDPVerifier.CallOpts, setId, pieceId) } -// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. +// PieceChallengable is a free data retrieval call binding the contract method 0xdc635266. // -// Solidity: function proxiableUUID() view returns(bytes32) -func (_PDPVerifier *PDPVerifierCallerSession) ProxiableUUID() ([32]byte, error) { - return _PDPVerifier.Contract.ProxiableUUID(&_PDPVerifier.CallOpts) +// Solidity: function pieceChallengable(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierCallerSession) PieceChallengable(setId *big.Int, pieceId *big.Int) (bool, error) { + return _PDPVerifier.Contract.PieceChallengable(&_PDPVerifier.CallOpts, setId, pieceId) } -// RootChallengable is a free data retrieval call binding the contract method 0x71cf2a16. +// PieceLive is a free data retrieval call binding the contract method 0x1a271225. // -// Solidity: function rootChallengable(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierCaller) RootChallengable(opts *bind.CallOpts, setId *big.Int, rootId *big.Int) (bool, error) { +// Solidity: function pieceLive(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierCaller) PieceLive(opts *bind.CallOpts, setId *big.Int, pieceId *big.Int) (bool, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "rootChallengable", setId, rootId) + err := _PDPVerifier.contract.Call(opts, &out, "pieceLive", setId, pieceId) if err != nil { return *new(bool), err @@ -1183,133 +1294,133 @@ func (_PDPVerifier *PDPVerifierCaller) RootChallengable(opts *bind.CallOpts, set } -// RootChallengable is a free data retrieval call binding the contract method 0x71cf2a16. +// PieceLive is a free data retrieval call binding the contract method 0x1a271225. // -// Solidity: function rootChallengable(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierSession) RootChallengable(setId *big.Int, rootId *big.Int) (bool, error) { - return _PDPVerifier.Contract.RootChallengable(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function pieceLive(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierSession) PieceLive(setId *big.Int, pieceId *big.Int) (bool, error) { + return _PDPVerifier.Contract.PieceLive(&_PDPVerifier.CallOpts, setId, pieceId) } -// RootChallengable is a free data retrieval call binding the contract method 0x71cf2a16. +// PieceLive is a free data retrieval call binding the contract method 0x1a271225. // -// Solidity: function rootChallengable(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierCallerSession) RootChallengable(setId *big.Int, rootId *big.Int) (bool, error) { - return _PDPVerifier.Contract.RootChallengable(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function pieceLive(uint256 setId, uint256 pieceId) view returns(bool) +func (_PDPVerifier *PDPVerifierCallerSession) PieceLive(setId *big.Int, pieceId *big.Int) (bool, error) { + return _PDPVerifier.Contract.PieceLive(&_PDPVerifier.CallOpts, setId, pieceId) } -// RootLive is a free data retrieval call binding the contract method 0x47331050. +// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. // -// Solidity: function rootLive(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierCaller) RootLive(opts *bind.CallOpts, setId *big.Int, rootId *big.Int) (bool, error) { +// Solidity: function proxiableUUID() view returns(bytes32) +func (_PDPVerifier *PDPVerifierCaller) ProxiableUUID(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} - err := _PDPVerifier.contract.Call(opts, &out, "rootLive", setId, rootId) + err := _PDPVerifier.contract.Call(opts, &out, "proxiableUUID") if err != nil { - return *new(bool), err + return *new([32]byte), err } - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err } -// RootLive is a free data retrieval call binding the contract method 0x47331050. +// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. // -// Solidity: function rootLive(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierSession) RootLive(setId *big.Int, rootId *big.Int) (bool, error) { - return _PDPVerifier.Contract.RootLive(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function proxiableUUID() view returns(bytes32) +func (_PDPVerifier *PDPVerifierSession) ProxiableUUID() ([32]byte, error) { + return _PDPVerifier.Contract.ProxiableUUID(&_PDPVerifier.CallOpts) } -// RootLive is a free data retrieval call binding the contract method 0x47331050. +// ProxiableUUID is a free data retrieval call binding the contract method 0x52d1902d. // -// Solidity: function rootLive(uint256 setId, uint256 rootId) view returns(bool) -func (_PDPVerifier *PDPVerifierCallerSession) RootLive(setId *big.Int, rootId *big.Int) (bool, error) { - return _PDPVerifier.Contract.RootLive(&_PDPVerifier.CallOpts, setId, rootId) +// Solidity: function proxiableUUID() view returns(bytes32) +func (_PDPVerifier *PDPVerifierCallerSession) ProxiableUUID() ([32]byte, error) { + return _PDPVerifier.Contract.ProxiableUUID(&_PDPVerifier.CallOpts) } -// AddRoots is a paid mutator transaction binding the contract method 0x11c0ee4a. +// AddPieces is a paid mutator transaction binding the contract method 0x306fc8be. // -// Solidity: function addRoots(uint256 setId, ((bytes),uint256)[] rootData, bytes extraData) returns(uint256) -func (_PDPVerifier *PDPVerifierTransactor) AddRoots(opts *bind.TransactOpts, setId *big.Int, rootData []PDPVerifierRootData, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "addRoots", setId, rootData, extraData) +// Solidity: function addPieces(uint256 setId, (bytes)[] pieceData, bytes extraData) returns(uint256) +func (_PDPVerifier *PDPVerifierTransactor) AddPieces(opts *bind.TransactOpts, setId *big.Int, pieceData []CidsCid, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "addPieces", setId, pieceData, extraData) } -// AddRoots is a paid mutator transaction binding the contract method 0x11c0ee4a. +// AddPieces is a paid mutator transaction binding the contract method 0x306fc8be. // -// Solidity: function addRoots(uint256 setId, ((bytes),uint256)[] rootData, bytes extraData) returns(uint256) -func (_PDPVerifier *PDPVerifierSession) AddRoots(setId *big.Int, rootData []PDPVerifierRootData, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.AddRoots(&_PDPVerifier.TransactOpts, setId, rootData, extraData) +// Solidity: function addPieces(uint256 setId, (bytes)[] pieceData, bytes extraData) returns(uint256) +func (_PDPVerifier *PDPVerifierSession) AddPieces(setId *big.Int, pieceData []CidsCid, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.AddPieces(&_PDPVerifier.TransactOpts, setId, pieceData, extraData) } -// AddRoots is a paid mutator transaction binding the contract method 0x11c0ee4a. +// AddPieces is a paid mutator transaction binding the contract method 0x306fc8be. // -// Solidity: function addRoots(uint256 setId, ((bytes),uint256)[] rootData, bytes extraData) returns(uint256) -func (_PDPVerifier *PDPVerifierTransactorSession) AddRoots(setId *big.Int, rootData []PDPVerifierRootData, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.AddRoots(&_PDPVerifier.TransactOpts, setId, rootData, extraData) +// Solidity: function addPieces(uint256 setId, (bytes)[] pieceData, bytes extraData) returns(uint256) +func (_PDPVerifier *PDPVerifierTransactorSession) AddPieces(setId *big.Int, pieceData []CidsCid, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.AddPieces(&_PDPVerifier.TransactOpts, setId, pieceData, extraData) } -// ClaimProofSetOwnership is a paid mutator transaction binding the contract method 0xee3dac65. +// ClaimDataSetStorageProvider is a paid mutator transaction binding the contract method 0xdf0f3248. // -// Solidity: function claimProofSetOwnership(uint256 setId) returns() -func (_PDPVerifier *PDPVerifierTransactor) ClaimProofSetOwnership(opts *bind.TransactOpts, setId *big.Int) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "claimProofSetOwnership", setId) +// Solidity: function claimDataSetStorageProvider(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactor) ClaimDataSetStorageProvider(opts *bind.TransactOpts, setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "claimDataSetStorageProvider", setId, extraData) } -// ClaimProofSetOwnership is a paid mutator transaction binding the contract method 0xee3dac65. +// ClaimDataSetStorageProvider is a paid mutator transaction binding the contract method 0xdf0f3248. // -// Solidity: function claimProofSetOwnership(uint256 setId) returns() -func (_PDPVerifier *PDPVerifierSession) ClaimProofSetOwnership(setId *big.Int) (*types.Transaction, error) { - return _PDPVerifier.Contract.ClaimProofSetOwnership(&_PDPVerifier.TransactOpts, setId) +// Solidity: function claimDataSetStorageProvider(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierSession) ClaimDataSetStorageProvider(setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.ClaimDataSetStorageProvider(&_PDPVerifier.TransactOpts, setId, extraData) } -// ClaimProofSetOwnership is a paid mutator transaction binding the contract method 0xee3dac65. +// ClaimDataSetStorageProvider is a paid mutator transaction binding the contract method 0xdf0f3248. // -// Solidity: function claimProofSetOwnership(uint256 setId) returns() -func (_PDPVerifier *PDPVerifierTransactorSession) ClaimProofSetOwnership(setId *big.Int) (*types.Transaction, error) { - return _PDPVerifier.Contract.ClaimProofSetOwnership(&_PDPVerifier.TransactOpts, setId) +// Solidity: function claimDataSetStorageProvider(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactorSession) ClaimDataSetStorageProvider(setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.ClaimDataSetStorageProvider(&_PDPVerifier.TransactOpts, setId, extraData) } -// CreateProofSet is a paid mutator transaction binding the contract method 0x0a4d7932. +// CreateDataSet is a paid mutator transaction binding the contract method 0xbbae41cb. // -// Solidity: function createProofSet(address listenerAddr, bytes extraData) payable returns(uint256) -func (_PDPVerifier *PDPVerifierTransactor) CreateProofSet(opts *bind.TransactOpts, listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "createProofSet", listenerAddr, extraData) +// Solidity: function createDataSet(address listenerAddr, bytes extraData) payable returns(uint256) +func (_PDPVerifier *PDPVerifierTransactor) CreateDataSet(opts *bind.TransactOpts, listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "createDataSet", listenerAddr, extraData) } -// CreateProofSet is a paid mutator transaction binding the contract method 0x0a4d7932. +// CreateDataSet is a paid mutator transaction binding the contract method 0xbbae41cb. // -// Solidity: function createProofSet(address listenerAddr, bytes extraData) payable returns(uint256) -func (_PDPVerifier *PDPVerifierSession) CreateProofSet(listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.CreateProofSet(&_PDPVerifier.TransactOpts, listenerAddr, extraData) +// Solidity: function createDataSet(address listenerAddr, bytes extraData) payable returns(uint256) +func (_PDPVerifier *PDPVerifierSession) CreateDataSet(listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.CreateDataSet(&_PDPVerifier.TransactOpts, listenerAddr, extraData) } -// CreateProofSet is a paid mutator transaction binding the contract method 0x0a4d7932. +// CreateDataSet is a paid mutator transaction binding the contract method 0xbbae41cb. // -// Solidity: function createProofSet(address listenerAddr, bytes extraData) payable returns(uint256) -func (_PDPVerifier *PDPVerifierTransactorSession) CreateProofSet(listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.CreateProofSet(&_PDPVerifier.TransactOpts, listenerAddr, extraData) +// Solidity: function createDataSet(address listenerAddr, bytes extraData) payable returns(uint256) +func (_PDPVerifier *PDPVerifierTransactorSession) CreateDataSet(listenerAddr common.Address, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.CreateDataSet(&_PDPVerifier.TransactOpts, listenerAddr, extraData) } -// DeleteProofSet is a paid mutator transaction binding the contract method 0x847d1d06. +// DeleteDataSet is a paid mutator transaction binding the contract method 0x7a1e2990. // -// Solidity: function deleteProofSet(uint256 setId, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierTransactor) DeleteProofSet(opts *bind.TransactOpts, setId *big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "deleteProofSet", setId, extraData) +// Solidity: function deleteDataSet(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactor) DeleteDataSet(opts *bind.TransactOpts, setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "deleteDataSet", setId, extraData) } -// DeleteProofSet is a paid mutator transaction binding the contract method 0x847d1d06. +// DeleteDataSet is a paid mutator transaction binding the contract method 0x7a1e2990. // -// Solidity: function deleteProofSet(uint256 setId, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierSession) DeleteProofSet(setId *big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.DeleteProofSet(&_PDPVerifier.TransactOpts, setId, extraData) +// Solidity: function deleteDataSet(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierSession) DeleteDataSet(setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.DeleteDataSet(&_PDPVerifier.TransactOpts, setId, extraData) } -// DeleteProofSet is a paid mutator transaction binding the contract method 0x847d1d06. +// DeleteDataSet is a paid mutator transaction binding the contract method 0x7a1e2990. // -// Solidity: function deleteProofSet(uint256 setId, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierTransactorSession) DeleteProofSet(setId *big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.DeleteProofSet(&_PDPVerifier.TransactOpts, setId, extraData) +// Solidity: function deleteDataSet(uint256 setId, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactorSession) DeleteDataSet(setId *big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.DeleteDataSet(&_PDPVerifier.TransactOpts, setId, extraData) } // Initialize is a paid mutator transaction binding the contract method 0xfe4b84df. @@ -1333,6 +1444,27 @@ func (_PDPVerifier *PDPVerifierTransactorSession) Initialize(_challengeFinality return _PDPVerifier.Contract.Initialize(&_PDPVerifier.TransactOpts, _challengeFinality) } +// Migrate is a paid mutator transaction binding the contract method 0x8fd3ab80. +// +// Solidity: function migrate() returns() +func (_PDPVerifier *PDPVerifierTransactor) Migrate(opts *bind.TransactOpts) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "migrate") +} + +// Migrate is a paid mutator transaction binding the contract method 0x8fd3ab80. +// +// Solidity: function migrate() returns() +func (_PDPVerifier *PDPVerifierSession) Migrate() (*types.Transaction, error) { + return _PDPVerifier.Contract.Migrate(&_PDPVerifier.TransactOpts) +} + +// Migrate is a paid mutator transaction binding the contract method 0x8fd3ab80. +// +// Solidity: function migrate() returns() +func (_PDPVerifier *PDPVerifierTransactorSession) Migrate() (*types.Transaction, error) { + return _PDPVerifier.Contract.Migrate(&_PDPVerifier.TransactOpts) +} + // NextProvingPeriod is a paid mutator transaction binding the contract method 0x45c0b92d. // // Solidity: function nextProvingPeriod(uint256 setId, uint256 challengeEpoch, bytes extraData) returns() @@ -1354,45 +1486,45 @@ func (_PDPVerifier *PDPVerifierTransactorSession) NextProvingPeriod(setId *big.I return _PDPVerifier.Contract.NextProvingPeriod(&_PDPVerifier.TransactOpts, setId, challengeEpoch, extraData) } -// ProposeProofSetOwner is a paid mutator transaction binding the contract method 0x6cb55c16. +// ProposeDataSetStorageProvider is a paid mutator transaction binding the contract method 0x43186080. // -// Solidity: function proposeProofSetOwner(uint256 setId, address newOwner) returns() -func (_PDPVerifier *PDPVerifierTransactor) ProposeProofSetOwner(opts *bind.TransactOpts, setId *big.Int, newOwner common.Address) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "proposeProofSetOwner", setId, newOwner) +// Solidity: function proposeDataSetStorageProvider(uint256 setId, address newStorageProvider) returns() +func (_PDPVerifier *PDPVerifierTransactor) ProposeDataSetStorageProvider(opts *bind.TransactOpts, setId *big.Int, newStorageProvider common.Address) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "proposeDataSetStorageProvider", setId, newStorageProvider) } -// ProposeProofSetOwner is a paid mutator transaction binding the contract method 0x6cb55c16. +// ProposeDataSetStorageProvider is a paid mutator transaction binding the contract method 0x43186080. // -// Solidity: function proposeProofSetOwner(uint256 setId, address newOwner) returns() -func (_PDPVerifier *PDPVerifierSession) ProposeProofSetOwner(setId *big.Int, newOwner common.Address) (*types.Transaction, error) { - return _PDPVerifier.Contract.ProposeProofSetOwner(&_PDPVerifier.TransactOpts, setId, newOwner) +// Solidity: function proposeDataSetStorageProvider(uint256 setId, address newStorageProvider) returns() +func (_PDPVerifier *PDPVerifierSession) ProposeDataSetStorageProvider(setId *big.Int, newStorageProvider common.Address) (*types.Transaction, error) { + return _PDPVerifier.Contract.ProposeDataSetStorageProvider(&_PDPVerifier.TransactOpts, setId, newStorageProvider) } -// ProposeProofSetOwner is a paid mutator transaction binding the contract method 0x6cb55c16. +// ProposeDataSetStorageProvider is a paid mutator transaction binding the contract method 0x43186080. // -// Solidity: function proposeProofSetOwner(uint256 setId, address newOwner) returns() -func (_PDPVerifier *PDPVerifierTransactorSession) ProposeProofSetOwner(setId *big.Int, newOwner common.Address) (*types.Transaction, error) { - return _PDPVerifier.Contract.ProposeProofSetOwner(&_PDPVerifier.TransactOpts, setId, newOwner) +// Solidity: function proposeDataSetStorageProvider(uint256 setId, address newStorageProvider) returns() +func (_PDPVerifier *PDPVerifierTransactorSession) ProposeDataSetStorageProvider(setId *big.Int, newStorageProvider common.Address) (*types.Transaction, error) { + return _PDPVerifier.Contract.ProposeDataSetStorageProvider(&_PDPVerifier.TransactOpts, setId, newStorageProvider) } // ProvePossession is a paid mutator transaction binding the contract method 0xf58f952b. // // Solidity: function provePossession(uint256 setId, (bytes32,bytes32[])[] proofs) payable returns() -func (_PDPVerifier *PDPVerifierTransactor) ProvePossession(opts *bind.TransactOpts, setId *big.Int, proofs []PDPVerifierProof) (*types.Transaction, error) { +func (_PDPVerifier *PDPVerifierTransactor) ProvePossession(opts *bind.TransactOpts, setId *big.Int, proofs []IPDPTypesProof) (*types.Transaction, error) { return _PDPVerifier.contract.Transact(opts, "provePossession", setId, proofs) } // ProvePossession is a paid mutator transaction binding the contract method 0xf58f952b. // // Solidity: function provePossession(uint256 setId, (bytes32,bytes32[])[] proofs) payable returns() -func (_PDPVerifier *PDPVerifierSession) ProvePossession(setId *big.Int, proofs []PDPVerifierProof) (*types.Transaction, error) { +func (_PDPVerifier *PDPVerifierSession) ProvePossession(setId *big.Int, proofs []IPDPTypesProof) (*types.Transaction, error) { return _PDPVerifier.Contract.ProvePossession(&_PDPVerifier.TransactOpts, setId, proofs) } // ProvePossession is a paid mutator transaction binding the contract method 0xf58f952b. // // Solidity: function provePossession(uint256 setId, (bytes32,bytes32[])[] proofs) payable returns() -func (_PDPVerifier *PDPVerifierTransactorSession) ProvePossession(setId *big.Int, proofs []PDPVerifierProof) (*types.Transaction, error) { +func (_PDPVerifier *PDPVerifierTransactorSession) ProvePossession(setId *big.Int, proofs []IPDPTypesProof) (*types.Transaction, error) { return _PDPVerifier.Contract.ProvePossession(&_PDPVerifier.TransactOpts, setId, proofs) } @@ -1417,25 +1549,25 @@ func (_PDPVerifier *PDPVerifierTransactorSession) RenounceOwnership() (*types.Tr return _PDPVerifier.Contract.RenounceOwnership(&_PDPVerifier.TransactOpts) } -// ScheduleRemovals is a paid mutator transaction binding the contract method 0x3b68e4e9. +// SchedulePieceDeletions is a paid mutator transaction binding the contract method 0x0c292024. // -// Solidity: function scheduleRemovals(uint256 setId, uint256[] rootIds, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierTransactor) ScheduleRemovals(opts *bind.TransactOpts, setId *big.Int, rootIds []*big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.contract.Transact(opts, "scheduleRemovals", setId, rootIds, extraData) +// Solidity: function schedulePieceDeletions(uint256 setId, uint256[] pieceIds, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactor) SchedulePieceDeletions(opts *bind.TransactOpts, setId *big.Int, pieceIds []*big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.contract.Transact(opts, "schedulePieceDeletions", setId, pieceIds, extraData) } -// ScheduleRemovals is a paid mutator transaction binding the contract method 0x3b68e4e9. +// SchedulePieceDeletions is a paid mutator transaction binding the contract method 0x0c292024. // -// Solidity: function scheduleRemovals(uint256 setId, uint256[] rootIds, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierSession) ScheduleRemovals(setId *big.Int, rootIds []*big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.ScheduleRemovals(&_PDPVerifier.TransactOpts, setId, rootIds, extraData) +// Solidity: function schedulePieceDeletions(uint256 setId, uint256[] pieceIds, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierSession) SchedulePieceDeletions(setId *big.Int, pieceIds []*big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.SchedulePieceDeletions(&_PDPVerifier.TransactOpts, setId, pieceIds, extraData) } -// ScheduleRemovals is a paid mutator transaction binding the contract method 0x3b68e4e9. +// SchedulePieceDeletions is a paid mutator transaction binding the contract method 0x0c292024. // -// Solidity: function scheduleRemovals(uint256 setId, uint256[] rootIds, bytes extraData) returns() -func (_PDPVerifier *PDPVerifierTransactorSession) ScheduleRemovals(setId *big.Int, rootIds []*big.Int, extraData []byte) (*types.Transaction, error) { - return _PDPVerifier.Contract.ScheduleRemovals(&_PDPVerifier.TransactOpts, setId, rootIds, extraData) +// Solidity: function schedulePieceDeletions(uint256 setId, uint256[] pieceIds, bytes extraData) returns() +func (_PDPVerifier *PDPVerifierTransactorSession) SchedulePieceDeletions(setId *big.Int, pieceIds []*big.Int, extraData []byte) (*types.Transaction, error) { + return _PDPVerifier.Contract.SchedulePieceDeletions(&_PDPVerifier.TransactOpts, setId, pieceIds, extraData) } // TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. @@ -1480,9 +1612,9 @@ func (_PDPVerifier *PDPVerifierTransactorSession) UpgradeToAndCall(newImplementa return _PDPVerifier.Contract.UpgradeToAndCall(&_PDPVerifier.TransactOpts, newImplementation, data) } -// PDPVerifierDebugIterator is returned from FilterDebug and is used to iterate over the raw logs and unpacked data for Debug events raised by the PDPVerifier contract. -type PDPVerifierDebugIterator struct { - Event *PDPVerifierDebug // Event containing the contract specifics and raw log +// PDPVerifierContractUpgradedIterator is returned from FilterContractUpgraded and is used to iterate over the raw logs and unpacked data for ContractUpgraded events raised by the PDPVerifier contract. +type PDPVerifierContractUpgradedIterator struct { + Event *PDPVerifierContractUpgraded // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -1496,7 +1628,7 @@ type PDPVerifierDebugIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierDebugIterator) Next() bool { +func (it *PDPVerifierContractUpgradedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -1505,7 +1637,7 @@ func (it *PDPVerifierDebugIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierDebug) + it.Event = new(PDPVerifierContractUpgraded) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1520,7 +1652,7 @@ func (it *PDPVerifierDebugIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierDebug) + it.Event = new(PDPVerifierContractUpgraded) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1536,42 +1668,42 @@ func (it *PDPVerifierDebugIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierDebugIterator) Error() error { +func (it *PDPVerifierContractUpgradedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierDebugIterator) Close() error { +func (it *PDPVerifierContractUpgradedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierDebug represents a Debug event raised by the PDPVerifier contract. -type PDPVerifierDebug struct { - Message string - Value *big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierContractUpgraded represents a ContractUpgraded event raised by the PDPVerifier contract. +type PDPVerifierContractUpgraded struct { + Version string + Implementation common.Address + Raw types.Log // Blockchain specific contextual infos } -// FilterDebug is a free log retrieval operation binding the contract event 0x3c5ad147104e56be34a9176a6692f7df8d2f4b29a5af06bc6b98970d329d6577. +// FilterContractUpgraded is a free log retrieval operation binding the contract event 0x2b51ff7c4cc8e6fe1c72e9d9685b7d2a88a5d82ad3a644afbdceb0272c89c1c3. // -// Solidity: event Debug(string message, uint256 value) -func (_PDPVerifier *PDPVerifierFilterer) FilterDebug(opts *bind.FilterOpts) (*PDPVerifierDebugIterator, error) { +// Solidity: event ContractUpgraded(string version, address implementation) +func (_PDPVerifier *PDPVerifierFilterer) FilterContractUpgraded(opts *bind.FilterOpts) (*PDPVerifierContractUpgradedIterator, error) { - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "Debug") + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ContractUpgraded") if err != nil { return nil, err } - return &PDPVerifierDebugIterator{contract: _PDPVerifier.contract, event: "Debug", logs: logs, sub: sub}, nil + return &PDPVerifierContractUpgradedIterator{contract: _PDPVerifier.contract, event: "ContractUpgraded", logs: logs, sub: sub}, nil } -// WatchDebug is a free log subscription operation binding the contract event 0x3c5ad147104e56be34a9176a6692f7df8d2f4b29a5af06bc6b98970d329d6577. +// WatchContractUpgraded is a free log subscription operation binding the contract event 0x2b51ff7c4cc8e6fe1c72e9d9685b7d2a88a5d82ad3a644afbdceb0272c89c1c3. // -// Solidity: event Debug(string message, uint256 value) -func (_PDPVerifier *PDPVerifierFilterer) WatchDebug(opts *bind.WatchOpts, sink chan<- *PDPVerifierDebug) (event.Subscription, error) { +// Solidity: event ContractUpgraded(string version, address implementation) +func (_PDPVerifier *PDPVerifierFilterer) WatchContractUpgraded(opts *bind.WatchOpts, sink chan<- *PDPVerifierContractUpgraded) (event.Subscription, error) { - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "Debug") + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ContractUpgraded") if err != nil { return nil, err } @@ -1581,8 +1713,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchDebug(opts *bind.WatchOpts, sink c select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierDebug) - if err := _PDPVerifier.contract.UnpackLog(event, "Debug", log); err != nil { + event := new(PDPVerifierContractUpgraded) + if err := _PDPVerifier.contract.UnpackLog(event, "ContractUpgraded", log); err != nil { return err } event.Raw = log @@ -1603,21 +1735,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchDebug(opts *bind.WatchOpts, sink c }), nil } -// ParseDebug is a log parse operation binding the contract event 0x3c5ad147104e56be34a9176a6692f7df8d2f4b29a5af06bc6b98970d329d6577. +// ParseContractUpgraded is a log parse operation binding the contract event 0x2b51ff7c4cc8e6fe1c72e9d9685b7d2a88a5d82ad3a644afbdceb0272c89c1c3. // -// Solidity: event Debug(string message, uint256 value) -func (_PDPVerifier *PDPVerifierFilterer) ParseDebug(log types.Log) (*PDPVerifierDebug, error) { - event := new(PDPVerifierDebug) - if err := _PDPVerifier.contract.UnpackLog(event, "Debug", log); err != nil { +// Solidity: event ContractUpgraded(string version, address implementation) +func (_PDPVerifier *PDPVerifierFilterer) ParseContractUpgraded(log types.Log) (*PDPVerifierContractUpgraded, error) { + event := new(PDPVerifierContractUpgraded) + if err := _PDPVerifier.contract.UnpackLog(event, "ContractUpgraded", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierInitializedIterator is returned from FilterInitialized and is used to iterate over the raw logs and unpacked data for Initialized events raised by the PDPVerifier contract. -type PDPVerifierInitializedIterator struct { - Event *PDPVerifierInitialized // Event containing the contract specifics and raw log +// PDPVerifierDataSetCreatedIterator is returned from FilterDataSetCreated and is used to iterate over the raw logs and unpacked data for DataSetCreated events raised by the PDPVerifier contract. +type PDPVerifierDataSetCreatedIterator struct { + Event *PDPVerifierDataSetCreated // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -1631,7 +1763,7 @@ type PDPVerifierInitializedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierInitializedIterator) Next() bool { +func (it *PDPVerifierDataSetCreatedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -1640,7 +1772,7 @@ func (it *PDPVerifierInitializedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierInitialized) + it.Event = new(PDPVerifierDataSetCreated) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1655,7 +1787,7 @@ func (it *PDPVerifierInitializedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierInitialized) + it.Event = new(PDPVerifierDataSetCreated) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1671,41 +1803,60 @@ func (it *PDPVerifierInitializedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierInitializedIterator) Error() error { +func (it *PDPVerifierDataSetCreatedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierInitializedIterator) Close() error { +func (it *PDPVerifierDataSetCreatedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierInitialized represents a Initialized event raised by the PDPVerifier contract. -type PDPVerifierInitialized struct { - Version uint64 - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierDataSetCreated represents a DataSetCreated event raised by the PDPVerifier contract. +type PDPVerifierDataSetCreated struct { + SetId *big.Int + StorageProvider common.Address + Raw types.Log // Blockchain specific contextual infos } -// FilterInitialized is a free log retrieval operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. +// FilterDataSetCreated is a free log retrieval operation binding the contract event 0x11369440e1b7135015c16acb9bc14b55b0f4b23b02010c363d34aec2e5b96281. // -// Solidity: event Initialized(uint64 version) -func (_PDPVerifier *PDPVerifierFilterer) FilterInitialized(opts *bind.FilterOpts) (*PDPVerifierInitializedIterator, error) { +// Solidity: event DataSetCreated(uint256 indexed setId, address indexed storageProvider) +func (_PDPVerifier *PDPVerifierFilterer) FilterDataSetCreated(opts *bind.FilterOpts, setId []*big.Int, storageProvider []common.Address) (*PDPVerifierDataSetCreatedIterator, error) { - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "Initialized") + var setIdRule []interface{} + for _, setIdItem := range setId { + setIdRule = append(setIdRule, setIdItem) + } + var storageProviderRule []interface{} + for _, storageProviderItem := range storageProvider { + storageProviderRule = append(storageProviderRule, storageProviderItem) + } + + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "DataSetCreated", setIdRule, storageProviderRule) if err != nil { return nil, err } - return &PDPVerifierInitializedIterator{contract: _PDPVerifier.contract, event: "Initialized", logs: logs, sub: sub}, nil + return &PDPVerifierDataSetCreatedIterator{contract: _PDPVerifier.contract, event: "DataSetCreated", logs: logs, sub: sub}, nil } -// WatchInitialized is a free log subscription operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. +// WatchDataSetCreated is a free log subscription operation binding the contract event 0x11369440e1b7135015c16acb9bc14b55b0f4b23b02010c363d34aec2e5b96281. // -// Solidity: event Initialized(uint64 version) -func (_PDPVerifier *PDPVerifierFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *PDPVerifierInitialized) (event.Subscription, error) { +// Solidity: event DataSetCreated(uint256 indexed setId, address indexed storageProvider) +func (_PDPVerifier *PDPVerifierFilterer) WatchDataSetCreated(opts *bind.WatchOpts, sink chan<- *PDPVerifierDataSetCreated, setId []*big.Int, storageProvider []common.Address) (event.Subscription, error) { - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "Initialized") + var setIdRule []interface{} + for _, setIdItem := range setId { + setIdRule = append(setIdRule, setIdItem) + } + var storageProviderRule []interface{} + for _, storageProviderItem := range storageProvider { + storageProviderRule = append(storageProviderRule, storageProviderItem) + } + + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "DataSetCreated", setIdRule, storageProviderRule) if err != nil { return nil, err } @@ -1715,8 +1866,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchInitialized(opts *bind.WatchOpts, select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierInitialized) - if err := _PDPVerifier.contract.UnpackLog(event, "Initialized", log); err != nil { + event := new(PDPVerifierDataSetCreated) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetCreated", log); err != nil { return err } event.Raw = log @@ -1737,21 +1888,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchInitialized(opts *bind.WatchOpts, }), nil } -// ParseInitialized is a log parse operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. +// ParseDataSetCreated is a log parse operation binding the contract event 0x11369440e1b7135015c16acb9bc14b55b0f4b23b02010c363d34aec2e5b96281. // -// Solidity: event Initialized(uint64 version) -func (_PDPVerifier *PDPVerifierFilterer) ParseInitialized(log types.Log) (*PDPVerifierInitialized, error) { - event := new(PDPVerifierInitialized) - if err := _PDPVerifier.contract.UnpackLog(event, "Initialized", log); err != nil { +// Solidity: event DataSetCreated(uint256 indexed setId, address indexed storageProvider) +func (_PDPVerifier *PDPVerifierFilterer) ParseDataSetCreated(log types.Log) (*PDPVerifierDataSetCreated, error) { + event := new(PDPVerifierDataSetCreated) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetCreated", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierNextProvingPeriodIterator is returned from FilterNextProvingPeriod and is used to iterate over the raw logs and unpacked data for NextProvingPeriod events raised by the PDPVerifier contract. -type PDPVerifierNextProvingPeriodIterator struct { - Event *PDPVerifierNextProvingPeriod // Event containing the contract specifics and raw log +// PDPVerifierDataSetDeletedIterator is returned from FilterDataSetDeleted and is used to iterate over the raw logs and unpacked data for DataSetDeleted events raised by the PDPVerifier contract. +type PDPVerifierDataSetDeletedIterator struct { + Event *PDPVerifierDataSetDeleted // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -1765,7 +1916,7 @@ type PDPVerifierNextProvingPeriodIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierNextProvingPeriodIterator) Next() bool { +func (it *PDPVerifierDataSetDeletedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -1774,7 +1925,7 @@ func (it *PDPVerifierNextProvingPeriodIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierNextProvingPeriod) + it.Event = new(PDPVerifierDataSetDeleted) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1789,7 +1940,7 @@ func (it *PDPVerifierNextProvingPeriodIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierNextProvingPeriod) + it.Event = new(PDPVerifierDataSetDeleted) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1805,53 +1956,52 @@ func (it *PDPVerifierNextProvingPeriodIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierNextProvingPeriodIterator) Error() error { +func (it *PDPVerifierDataSetDeletedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierNextProvingPeriodIterator) Close() error { +func (it *PDPVerifierDataSetDeletedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierNextProvingPeriod represents a NextProvingPeriod event raised by the PDPVerifier contract. -type PDPVerifierNextProvingPeriod struct { - SetId *big.Int - ChallengeEpoch *big.Int - LeafCount *big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierDataSetDeleted represents a DataSetDeleted event raised by the PDPVerifier contract. +type PDPVerifierDataSetDeleted struct { + SetId *big.Int + DeletedLeafCount *big.Int + Raw types.Log // Blockchain specific contextual infos } -// FilterNextProvingPeriod is a free log retrieval operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. +// FilterDataSetDeleted is a free log retrieval operation binding the contract event 0x14eeeef7679fcb051c6572811f61c07bedccd0f1cfc1f9b79b23e47c5c52aeb7. // -// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) -func (_PDPVerifier *PDPVerifierFilterer) FilterNextProvingPeriod(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierNextProvingPeriodIterator, error) { +// Solidity: event DataSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) +func (_PDPVerifier *PDPVerifierFilterer) FilterDataSetDeleted(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierDataSetDeletedIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "NextProvingPeriod", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "DataSetDeleted", setIdRule) if err != nil { return nil, err } - return &PDPVerifierNextProvingPeriodIterator{contract: _PDPVerifier.contract, event: "NextProvingPeriod", logs: logs, sub: sub}, nil + return &PDPVerifierDataSetDeletedIterator{contract: _PDPVerifier.contract, event: "DataSetDeleted", logs: logs, sub: sub}, nil } -// WatchNextProvingPeriod is a free log subscription operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. +// WatchDataSetDeleted is a free log subscription operation binding the contract event 0x14eeeef7679fcb051c6572811f61c07bedccd0f1cfc1f9b79b23e47c5c52aeb7. // -// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) -func (_PDPVerifier *PDPVerifierFilterer) WatchNextProvingPeriod(opts *bind.WatchOpts, sink chan<- *PDPVerifierNextProvingPeriod, setId []*big.Int) (event.Subscription, error) { +// Solidity: event DataSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) +func (_PDPVerifier *PDPVerifierFilterer) WatchDataSetDeleted(opts *bind.WatchOpts, sink chan<- *PDPVerifierDataSetDeleted, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "NextProvingPeriod", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "DataSetDeleted", setIdRule) if err != nil { return nil, err } @@ -1861,8 +2011,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchNextProvingPeriod(opts *bind.Watch select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierNextProvingPeriod) - if err := _PDPVerifier.contract.UnpackLog(event, "NextProvingPeriod", log); err != nil { + event := new(PDPVerifierDataSetDeleted) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetDeleted", log); err != nil { return err } event.Raw = log @@ -1883,21 +2033,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchNextProvingPeriod(opts *bind.Watch }), nil } -// ParseNextProvingPeriod is a log parse operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. +// ParseDataSetDeleted is a log parse operation binding the contract event 0x14eeeef7679fcb051c6572811f61c07bedccd0f1cfc1f9b79b23e47c5c52aeb7. // -// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) -func (_PDPVerifier *PDPVerifierFilterer) ParseNextProvingPeriod(log types.Log) (*PDPVerifierNextProvingPeriod, error) { - event := new(PDPVerifierNextProvingPeriod) - if err := _PDPVerifier.contract.UnpackLog(event, "NextProvingPeriod", log); err != nil { +// Solidity: event DataSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) +func (_PDPVerifier *PDPVerifierFilterer) ParseDataSetDeleted(log types.Log) (*PDPVerifierDataSetDeleted, error) { + event := new(PDPVerifierDataSetDeleted) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetDeleted", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the PDPVerifier contract. -type PDPVerifierOwnershipTransferredIterator struct { - Event *PDPVerifierOwnershipTransferred // Event containing the contract specifics and raw log +// PDPVerifierDataSetEmptyIterator is returned from FilterDataSetEmpty and is used to iterate over the raw logs and unpacked data for DataSetEmpty events raised by the PDPVerifier contract. +type PDPVerifierDataSetEmptyIterator struct { + Event *PDPVerifierDataSetEmpty // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -1911,7 +2061,7 @@ type PDPVerifierOwnershipTransferredIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierOwnershipTransferredIterator) Next() bool { +func (it *PDPVerifierDataSetEmptyIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -1920,7 +2070,7 @@ func (it *PDPVerifierOwnershipTransferredIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierOwnershipTransferred) + it.Event = new(PDPVerifierDataSetEmpty) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1935,7 +2085,7 @@ func (it *PDPVerifierOwnershipTransferredIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierOwnershipTransferred) + it.Event = new(PDPVerifierDataSetEmpty) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -1951,60 +2101,51 @@ func (it *PDPVerifierOwnershipTransferredIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierOwnershipTransferredIterator) Error() error { +func (it *PDPVerifierDataSetEmptyIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierOwnershipTransferredIterator) Close() error { +func (it *PDPVerifierDataSetEmptyIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierOwnershipTransferred represents a OwnershipTransferred event raised by the PDPVerifier contract. -type PDPVerifierOwnershipTransferred struct { - PreviousOwner common.Address - NewOwner common.Address - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierDataSetEmpty represents a DataSetEmpty event raised by the PDPVerifier contract. +type PDPVerifierDataSetEmpty struct { + SetId *big.Int + Raw types.Log // Blockchain specific contextual infos } -// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// FilterDataSetEmpty is a free log retrieval operation binding the contract event 0x02a8400fc343f45098cb00c3a6ea694174771939a5503f663e0ff6f4eb7c2842. // -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*PDPVerifierOwnershipTransferredIterator, error) { +// Solidity: event DataSetEmpty(uint256 indexed setId) +func (_PDPVerifier *PDPVerifierFilterer) FilterDataSetEmpty(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierDataSetEmptyIterator, error) { - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) + var setIdRule []interface{} + for _, setIdItem := range setId { + setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "DataSetEmpty", setIdRule) if err != nil { return nil, err } - return &PDPVerifierOwnershipTransferredIterator{contract: _PDPVerifier.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil + return &PDPVerifierDataSetEmptyIterator{contract: _PDPVerifier.contract, event: "DataSetEmpty", logs: logs, sub: sub}, nil } -// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// WatchDataSetEmpty is a free log subscription operation binding the contract event 0x02a8400fc343f45098cb00c3a6ea694174771939a5503f663e0ff6f4eb7c2842. // -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *PDPVerifierOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { +// Solidity: event DataSetEmpty(uint256 indexed setId) +func (_PDPVerifier *PDPVerifierFilterer) WatchDataSetEmpty(opts *bind.WatchOpts, sink chan<- *PDPVerifierDataSetEmpty, setId []*big.Int) (event.Subscription, error) { - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) + var setIdRule []interface{} + for _, setIdItem := range setId { + setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "DataSetEmpty", setIdRule) if err != nil { return nil, err } @@ -2014,8 +2155,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchOwnershipTransferred(opts *bind.Wa select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierOwnershipTransferred) - if err := _PDPVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + event := new(PDPVerifierDataSetEmpty) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetEmpty", log); err != nil { return err } event.Raw = log @@ -2036,21 +2177,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchOwnershipTransferred(opts *bind.Wa }), nil } -// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// ParseDataSetEmpty is a log parse operation binding the contract event 0x02a8400fc343f45098cb00c3a6ea694174771939a5503f663e0ff6f4eb7c2842. // -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) ParseOwnershipTransferred(log types.Log) (*PDPVerifierOwnershipTransferred, error) { - event := new(PDPVerifierOwnershipTransferred) - if err := _PDPVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { +// Solidity: event DataSetEmpty(uint256 indexed setId) +func (_PDPVerifier *PDPVerifierFilterer) ParseDataSetEmpty(log types.Log) (*PDPVerifierDataSetEmpty, error) { + event := new(PDPVerifierDataSetEmpty) + if err := _PDPVerifier.contract.UnpackLog(event, "DataSetEmpty", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierPossessionProvenIterator is returned from FilterPossessionProven and is used to iterate over the raw logs and unpacked data for PossessionProven events raised by the PDPVerifier contract. -type PDPVerifierPossessionProvenIterator struct { - Event *PDPVerifierPossessionProven // Event containing the contract specifics and raw log +// PDPVerifierInitializedIterator is returned from FilterInitialized and is used to iterate over the raw logs and unpacked data for Initialized events raised by the PDPVerifier contract. +type PDPVerifierInitializedIterator struct { + Event *PDPVerifierInitialized // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2064,7 +2205,7 @@ type PDPVerifierPossessionProvenIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierPossessionProvenIterator) Next() bool { +func (it *PDPVerifierInitializedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2073,7 +2214,7 @@ func (it *PDPVerifierPossessionProvenIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierPossessionProven) + it.Event = new(PDPVerifierInitialized) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2088,7 +2229,7 @@ func (it *PDPVerifierPossessionProvenIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierPossessionProven) + it.Event = new(PDPVerifierInitialized) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2104,52 +2245,41 @@ func (it *PDPVerifierPossessionProvenIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierPossessionProvenIterator) Error() error { +func (it *PDPVerifierInitializedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierPossessionProvenIterator) Close() error { +func (it *PDPVerifierInitializedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierPossessionProven represents a PossessionProven event raised by the PDPVerifier contract. -type PDPVerifierPossessionProven struct { - SetId *big.Int - Challenges []PDPVerifierRootIdAndOffset - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierInitialized represents a Initialized event raised by the PDPVerifier contract. +type PDPVerifierInitialized struct { + Version uint64 + Raw types.Log // Blockchain specific contextual infos } -// FilterPossessionProven is a free log retrieval operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. +// FilterInitialized is a free log retrieval operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. // -// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) -func (_PDPVerifier *PDPVerifierFilterer) FilterPossessionProven(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierPossessionProvenIterator, error) { - - var setIdRule []interface{} - for _, setIdItem := range setId { - setIdRule = append(setIdRule, setIdItem) - } +// Solidity: event Initialized(uint64 version) +func (_PDPVerifier *PDPVerifierFilterer) FilterInitialized(opts *bind.FilterOpts) (*PDPVerifierInitializedIterator, error) { - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "PossessionProven", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "Initialized") if err != nil { return nil, err } - return &PDPVerifierPossessionProvenIterator{contract: _PDPVerifier.contract, event: "PossessionProven", logs: logs, sub: sub}, nil + return &PDPVerifierInitializedIterator{contract: _PDPVerifier.contract, event: "Initialized", logs: logs, sub: sub}, nil } -// WatchPossessionProven is a free log subscription operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. +// WatchInitialized is a free log subscription operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. // -// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) -func (_PDPVerifier *PDPVerifierFilterer) WatchPossessionProven(opts *bind.WatchOpts, sink chan<- *PDPVerifierPossessionProven, setId []*big.Int) (event.Subscription, error) { - - var setIdRule []interface{} - for _, setIdItem := range setId { - setIdRule = append(setIdRule, setIdItem) - } +// Solidity: event Initialized(uint64 version) +func (_PDPVerifier *PDPVerifierFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *PDPVerifierInitialized) (event.Subscription, error) { - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "PossessionProven", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "Initialized") if err != nil { return nil, err } @@ -2159,8 +2289,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchPossessionProven(opts *bind.WatchO select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierPossessionProven) - if err := _PDPVerifier.contract.UnpackLog(event, "PossessionProven", log); err != nil { + event := new(PDPVerifierInitialized) + if err := _PDPVerifier.contract.UnpackLog(event, "Initialized", log); err != nil { return err } event.Raw = log @@ -2181,21 +2311,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchPossessionProven(opts *bind.WatchO }), nil } -// ParsePossessionProven is a log parse operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. +// ParseInitialized is a log parse operation binding the contract event 0xc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2. // -// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) -func (_PDPVerifier *PDPVerifierFilterer) ParsePossessionProven(log types.Log) (*PDPVerifierPossessionProven, error) { - event := new(PDPVerifierPossessionProven) - if err := _PDPVerifier.contract.UnpackLog(event, "PossessionProven", log); err != nil { +// Solidity: event Initialized(uint64 version) +func (_PDPVerifier *PDPVerifierFilterer) ParseInitialized(log types.Log) (*PDPVerifierInitialized, error) { + event := new(PDPVerifierInitialized) + if err := _PDPVerifier.contract.UnpackLog(event, "Initialized", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierProofFeePaidIterator is returned from FilterProofFeePaid and is used to iterate over the raw logs and unpacked data for ProofFeePaid events raised by the PDPVerifier contract. -type PDPVerifierProofFeePaidIterator struct { - Event *PDPVerifierProofFeePaid // Event containing the contract specifics and raw log +// PDPVerifierNextProvingPeriodIterator is returned from FilterNextProvingPeriod and is used to iterate over the raw logs and unpacked data for NextProvingPeriod events raised by the PDPVerifier contract. +type PDPVerifierNextProvingPeriodIterator struct { + Event *PDPVerifierNextProvingPeriod // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2209,7 +2339,7 @@ type PDPVerifierProofFeePaidIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierProofFeePaidIterator) Next() bool { +func (it *PDPVerifierNextProvingPeriodIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2218,7 +2348,7 @@ func (it *PDPVerifierProofFeePaidIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierProofFeePaid) + it.Event = new(PDPVerifierNextProvingPeriod) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2233,7 +2363,7 @@ func (it *PDPVerifierProofFeePaidIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierProofFeePaid) + it.Event = new(PDPVerifierNextProvingPeriod) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2249,54 +2379,53 @@ func (it *PDPVerifierProofFeePaidIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierProofFeePaidIterator) Error() error { +func (it *PDPVerifierNextProvingPeriodIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierProofFeePaidIterator) Close() error { +func (it *PDPVerifierNextProvingPeriodIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierProofFeePaid represents a ProofFeePaid event raised by the PDPVerifier contract. -type PDPVerifierProofFeePaid struct { - SetId *big.Int - Fee *big.Int - Price uint64 - Expo int32 - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierNextProvingPeriod represents a NextProvingPeriod event raised by the PDPVerifier contract. +type PDPVerifierNextProvingPeriod struct { + SetId *big.Int + ChallengeEpoch *big.Int + LeafCount *big.Int + Raw types.Log // Blockchain specific contextual infos } -// FilterProofFeePaid is a free log retrieval operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. +// FilterNextProvingPeriod is a free log retrieval operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. // -// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) -func (_PDPVerifier *PDPVerifierFilterer) FilterProofFeePaid(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierProofFeePaidIterator, error) { +// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) +func (_PDPVerifier *PDPVerifierFilterer) FilterNextProvingPeriod(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierNextProvingPeriodIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofFeePaid", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "NextProvingPeriod", setIdRule) if err != nil { return nil, err } - return &PDPVerifierProofFeePaidIterator{contract: _PDPVerifier.contract, event: "ProofFeePaid", logs: logs, sub: sub}, nil + return &PDPVerifierNextProvingPeriodIterator{contract: _PDPVerifier.contract, event: "NextProvingPeriod", logs: logs, sub: sub}, nil } -// WatchProofFeePaid is a free log subscription operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. +// WatchNextProvingPeriod is a free log subscription operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. // -// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) -func (_PDPVerifier *PDPVerifierFilterer) WatchProofFeePaid(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofFeePaid, setId []*big.Int) (event.Subscription, error) { +// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) +func (_PDPVerifier *PDPVerifierFilterer) WatchNextProvingPeriod(opts *bind.WatchOpts, sink chan<- *PDPVerifierNextProvingPeriod, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofFeePaid", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "NextProvingPeriod", setIdRule) if err != nil { return nil, err } @@ -2306,8 +2435,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofFeePaid(opts *bind.WatchOpts, select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierProofFeePaid) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofFeePaid", log); err != nil { + event := new(PDPVerifierNextProvingPeriod) + if err := _PDPVerifier.contract.UnpackLog(event, "NextProvingPeriod", log); err != nil { return err } event.Raw = log @@ -2328,21 +2457,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofFeePaid(opts *bind.WatchOpts, }), nil } -// ParseProofFeePaid is a log parse operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. +// ParseNextProvingPeriod is a log parse operation binding the contract event 0xc099ffec4e3e773644a4d1dda368c46af853a0eeb15babde217f53a657396e1e. // -// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) -func (_PDPVerifier *PDPVerifierFilterer) ParseProofFeePaid(log types.Log) (*PDPVerifierProofFeePaid, error) { - event := new(PDPVerifierProofFeePaid) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofFeePaid", log); err != nil { +// Solidity: event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount) +func (_PDPVerifier *PDPVerifierFilterer) ParseNextProvingPeriod(log types.Log) (*PDPVerifierNextProvingPeriod, error) { + event := new(PDPVerifierNextProvingPeriod) + if err := _PDPVerifier.contract.UnpackLog(event, "NextProvingPeriod", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierProofSetCreatedIterator is returned from FilterProofSetCreated and is used to iterate over the raw logs and unpacked data for ProofSetCreated events raised by the PDPVerifier contract. -type PDPVerifierProofSetCreatedIterator struct { - Event *PDPVerifierProofSetCreated // Event containing the contract specifics and raw log +// PDPVerifierOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the PDPVerifier contract. +type PDPVerifierOwnershipTransferredIterator struct { + Event *PDPVerifierOwnershipTransferred // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2356,7 +2485,7 @@ type PDPVerifierProofSetCreatedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierProofSetCreatedIterator) Next() bool { +func (it *PDPVerifierOwnershipTransferredIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2365,7 +2494,7 @@ func (it *PDPVerifierProofSetCreatedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetCreated) + it.Event = new(PDPVerifierOwnershipTransferred) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2380,7 +2509,7 @@ func (it *PDPVerifierProofSetCreatedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetCreated) + it.Event = new(PDPVerifierOwnershipTransferred) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2396,60 +2525,60 @@ func (it *PDPVerifierProofSetCreatedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierProofSetCreatedIterator) Error() error { +func (it *PDPVerifierOwnershipTransferredIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierProofSetCreatedIterator) Close() error { +func (it *PDPVerifierOwnershipTransferredIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierProofSetCreated represents a ProofSetCreated event raised by the PDPVerifier contract. -type PDPVerifierProofSetCreated struct { - SetId *big.Int - Owner common.Address - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierOwnershipTransferred represents a OwnershipTransferred event raised by the PDPVerifier contract. +type PDPVerifierOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log // Blockchain specific contextual infos } -// FilterProofSetCreated is a free log retrieval operation binding the contract event 0x017f0b33d96e8f9968590172013032c2346cf047787a5e17a44b0a1bb3cd0f01. +// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. // -// Solidity: event ProofSetCreated(uint256 indexed setId, address indexed owner) -func (_PDPVerifier *PDPVerifierFilterer) FilterProofSetCreated(opts *bind.FilterOpts, setId []*big.Int, owner []common.Address) (*PDPVerifierProofSetCreatedIterator, error) { +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_PDPVerifier *PDPVerifierFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*PDPVerifierOwnershipTransferredIterator, error) { - var setIdRule []interface{} - for _, setIdItem := range setId { - setIdRule = append(setIdRule, setIdItem) + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) } - var ownerRule []interface{} - for _, ownerItem := range owner { - ownerRule = append(ownerRule, ownerItem) + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofSetCreated", setIdRule, ownerRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) if err != nil { return nil, err } - return &PDPVerifierProofSetCreatedIterator{contract: _PDPVerifier.contract, event: "ProofSetCreated", logs: logs, sub: sub}, nil + return &PDPVerifierOwnershipTransferredIterator{contract: _PDPVerifier.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil } -// WatchProofSetCreated is a free log subscription operation binding the contract event 0x017f0b33d96e8f9968590172013032c2346cf047787a5e17a44b0a1bb3cd0f01. +// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. // -// Solidity: event ProofSetCreated(uint256 indexed setId, address indexed owner) -func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetCreated(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofSetCreated, setId []*big.Int, owner []common.Address) (event.Subscription, error) { +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_PDPVerifier *PDPVerifierFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *PDPVerifierOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { - var setIdRule []interface{} - for _, setIdItem := range setId { - setIdRule = append(setIdRule, setIdItem) + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) } - var ownerRule []interface{} - for _, ownerItem := range owner { - ownerRule = append(ownerRule, ownerItem) + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofSetCreated", setIdRule, ownerRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) if err != nil { return nil, err } @@ -2459,8 +2588,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetCreated(opts *bind.WatchOp select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierProofSetCreated) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetCreated", log); err != nil { + event := new(PDPVerifierOwnershipTransferred) + if err := _PDPVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { return err } event.Raw = log @@ -2481,21 +2610,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetCreated(opts *bind.WatchOp }), nil } -// ParseProofSetCreated is a log parse operation binding the contract event 0x017f0b33d96e8f9968590172013032c2346cf047787a5e17a44b0a1bb3cd0f01. +// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. // -// Solidity: event ProofSetCreated(uint256 indexed setId, address indexed owner) -func (_PDPVerifier *PDPVerifierFilterer) ParseProofSetCreated(log types.Log) (*PDPVerifierProofSetCreated, error) { - event := new(PDPVerifierProofSetCreated) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetCreated", log); err != nil { +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_PDPVerifier *PDPVerifierFilterer) ParseOwnershipTransferred(log types.Log) (*PDPVerifierOwnershipTransferred, error) { + event := new(PDPVerifierOwnershipTransferred) + if err := _PDPVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierProofSetDeletedIterator is returned from FilterProofSetDeleted and is used to iterate over the raw logs and unpacked data for ProofSetDeleted events raised by the PDPVerifier contract. -type PDPVerifierProofSetDeletedIterator struct { - Event *PDPVerifierProofSetDeleted // Event containing the contract specifics and raw log +// PDPVerifierPiecesAddedIterator is returned from FilterPiecesAdded and is used to iterate over the raw logs and unpacked data for PiecesAdded events raised by the PDPVerifier contract. +type PDPVerifierPiecesAddedIterator struct { + Event *PDPVerifierPiecesAdded // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2509,7 +2638,7 @@ type PDPVerifierProofSetDeletedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierProofSetDeletedIterator) Next() bool { +func (it *PDPVerifierPiecesAddedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2518,7 +2647,7 @@ func (it *PDPVerifierProofSetDeletedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetDeleted) + it.Event = new(PDPVerifierPiecesAdded) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2533,7 +2662,7 @@ func (it *PDPVerifierProofSetDeletedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetDeleted) + it.Event = new(PDPVerifierPiecesAdded) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2549,52 +2678,53 @@ func (it *PDPVerifierProofSetDeletedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierProofSetDeletedIterator) Error() error { +func (it *PDPVerifierPiecesAddedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierProofSetDeletedIterator) Close() error { +func (it *PDPVerifierPiecesAddedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierProofSetDeleted represents a ProofSetDeleted event raised by the PDPVerifier contract. -type PDPVerifierProofSetDeleted struct { - SetId *big.Int - DeletedLeafCount *big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierPiecesAdded represents a PiecesAdded event raised by the PDPVerifier contract. +type PDPVerifierPiecesAdded struct { + SetId *big.Int + PieceIds []*big.Int + PieceCids []CidsCid + Raw types.Log // Blockchain specific contextual infos } -// FilterProofSetDeleted is a free log retrieval operation binding the contract event 0x589e9a441b5bddda77c4ab647b0108764a9cc1a7f655aa9b7bc50b8bdfab8673. +// FilterPiecesAdded is a free log retrieval operation binding the contract event 0x396df50222a87662e94bb7d173792d5e61fe0b193b6ccf791f7ce433f0b28207. // -// Solidity: event ProofSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) -func (_PDPVerifier *PDPVerifierFilterer) FilterProofSetDeleted(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierProofSetDeletedIterator, error) { +// Solidity: event PiecesAdded(uint256 indexed setId, uint256[] pieceIds, (bytes)[] pieceCids) +func (_PDPVerifier *PDPVerifierFilterer) FilterPiecesAdded(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierPiecesAddedIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofSetDeleted", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "PiecesAdded", setIdRule) if err != nil { return nil, err } - return &PDPVerifierProofSetDeletedIterator{contract: _PDPVerifier.contract, event: "ProofSetDeleted", logs: logs, sub: sub}, nil + return &PDPVerifierPiecesAddedIterator{contract: _PDPVerifier.contract, event: "PiecesAdded", logs: logs, sub: sub}, nil } -// WatchProofSetDeleted is a free log subscription operation binding the contract event 0x589e9a441b5bddda77c4ab647b0108764a9cc1a7f655aa9b7bc50b8bdfab8673. +// WatchPiecesAdded is a free log subscription operation binding the contract event 0x396df50222a87662e94bb7d173792d5e61fe0b193b6ccf791f7ce433f0b28207. // -// Solidity: event ProofSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) -func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetDeleted(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofSetDeleted, setId []*big.Int) (event.Subscription, error) { +// Solidity: event PiecesAdded(uint256 indexed setId, uint256[] pieceIds, (bytes)[] pieceCids) +func (_PDPVerifier *PDPVerifierFilterer) WatchPiecesAdded(opts *bind.WatchOpts, sink chan<- *PDPVerifierPiecesAdded, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofSetDeleted", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "PiecesAdded", setIdRule) if err != nil { return nil, err } @@ -2604,8 +2734,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetDeleted(opts *bind.WatchOp select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierProofSetDeleted) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetDeleted", log); err != nil { + event := new(PDPVerifierPiecesAdded) + if err := _PDPVerifier.contract.UnpackLog(event, "PiecesAdded", log); err != nil { return err } event.Raw = log @@ -2626,21 +2756,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetDeleted(opts *bind.WatchOp }), nil } -// ParseProofSetDeleted is a log parse operation binding the contract event 0x589e9a441b5bddda77c4ab647b0108764a9cc1a7f655aa9b7bc50b8bdfab8673. +// ParsePiecesAdded is a log parse operation binding the contract event 0x396df50222a87662e94bb7d173792d5e61fe0b193b6ccf791f7ce433f0b28207. // -// Solidity: event ProofSetDeleted(uint256 indexed setId, uint256 deletedLeafCount) -func (_PDPVerifier *PDPVerifierFilterer) ParseProofSetDeleted(log types.Log) (*PDPVerifierProofSetDeleted, error) { - event := new(PDPVerifierProofSetDeleted) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetDeleted", log); err != nil { +// Solidity: event PiecesAdded(uint256 indexed setId, uint256[] pieceIds, (bytes)[] pieceCids) +func (_PDPVerifier *PDPVerifierFilterer) ParsePiecesAdded(log types.Log) (*PDPVerifierPiecesAdded, error) { + event := new(PDPVerifierPiecesAdded) + if err := _PDPVerifier.contract.UnpackLog(event, "PiecesAdded", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierProofSetEmptyIterator is returned from FilterProofSetEmpty and is used to iterate over the raw logs and unpacked data for ProofSetEmpty events raised by the PDPVerifier contract. -type PDPVerifierProofSetEmptyIterator struct { - Event *PDPVerifierProofSetEmpty // Event containing the contract specifics and raw log +// PDPVerifierPiecesRemovedIterator is returned from FilterPiecesRemoved and is used to iterate over the raw logs and unpacked data for PiecesRemoved events raised by the PDPVerifier contract. +type PDPVerifierPiecesRemovedIterator struct { + Event *PDPVerifierPiecesRemoved // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2654,7 +2784,7 @@ type PDPVerifierProofSetEmptyIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierProofSetEmptyIterator) Next() bool { +func (it *PDPVerifierPiecesRemovedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2663,7 +2793,7 @@ func (it *PDPVerifierProofSetEmptyIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetEmpty) + it.Event = new(PDPVerifierPiecesRemoved) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2678,7 +2808,7 @@ func (it *PDPVerifierProofSetEmptyIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetEmpty) + it.Event = new(PDPVerifierPiecesRemoved) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2694,51 +2824,52 @@ func (it *PDPVerifierProofSetEmptyIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierProofSetEmptyIterator) Error() error { +func (it *PDPVerifierPiecesRemovedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierProofSetEmptyIterator) Close() error { +func (it *PDPVerifierPiecesRemovedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierProofSetEmpty represents a ProofSetEmpty event raised by the PDPVerifier contract. -type PDPVerifierProofSetEmpty struct { - SetId *big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierPiecesRemoved represents a PiecesRemoved event raised by the PDPVerifier contract. +type PDPVerifierPiecesRemoved struct { + SetId *big.Int + PieceIds []*big.Int + Raw types.Log // Blockchain specific contextual infos } -// FilterProofSetEmpty is a free log retrieval operation binding the contract event 0x323c29bc8d678a5d987b90a321982d10b9a91bcad071a9e445879497bf0e68e7. +// FilterPiecesRemoved is a free log retrieval operation binding the contract event 0x6e87df804629ac17804b57ba7abbdfac8bdc36bab504fb8a8801eb313a8ce7b1. // -// Solidity: event ProofSetEmpty(uint256 indexed setId) -func (_PDPVerifier *PDPVerifierFilterer) FilterProofSetEmpty(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierProofSetEmptyIterator, error) { +// Solidity: event PiecesRemoved(uint256 indexed setId, uint256[] pieceIds) +func (_PDPVerifier *PDPVerifierFilterer) FilterPiecesRemoved(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierPiecesRemovedIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofSetEmpty", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "PiecesRemoved", setIdRule) if err != nil { return nil, err } - return &PDPVerifierProofSetEmptyIterator{contract: _PDPVerifier.contract, event: "ProofSetEmpty", logs: logs, sub: sub}, nil + return &PDPVerifierPiecesRemovedIterator{contract: _PDPVerifier.contract, event: "PiecesRemoved", logs: logs, sub: sub}, nil } -// WatchProofSetEmpty is a free log subscription operation binding the contract event 0x323c29bc8d678a5d987b90a321982d10b9a91bcad071a9e445879497bf0e68e7. +// WatchPiecesRemoved is a free log subscription operation binding the contract event 0x6e87df804629ac17804b57ba7abbdfac8bdc36bab504fb8a8801eb313a8ce7b1. // -// Solidity: event ProofSetEmpty(uint256 indexed setId) -func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetEmpty(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofSetEmpty, setId []*big.Int) (event.Subscription, error) { +// Solidity: event PiecesRemoved(uint256 indexed setId, uint256[] pieceIds) +func (_PDPVerifier *PDPVerifierFilterer) WatchPiecesRemoved(opts *bind.WatchOpts, sink chan<- *PDPVerifierPiecesRemoved, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofSetEmpty", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "PiecesRemoved", setIdRule) if err != nil { return nil, err } @@ -2748,8 +2879,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetEmpty(opts *bind.WatchOpts select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierProofSetEmpty) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetEmpty", log); err != nil { + event := new(PDPVerifierPiecesRemoved) + if err := _PDPVerifier.contract.UnpackLog(event, "PiecesRemoved", log); err != nil { return err } event.Raw = log @@ -2770,21 +2901,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetEmpty(opts *bind.WatchOpts }), nil } -// ParseProofSetEmpty is a log parse operation binding the contract event 0x323c29bc8d678a5d987b90a321982d10b9a91bcad071a9e445879497bf0e68e7. +// ParsePiecesRemoved is a log parse operation binding the contract event 0x6e87df804629ac17804b57ba7abbdfac8bdc36bab504fb8a8801eb313a8ce7b1. // -// Solidity: event ProofSetEmpty(uint256 indexed setId) -func (_PDPVerifier *PDPVerifierFilterer) ParseProofSetEmpty(log types.Log) (*PDPVerifierProofSetEmpty, error) { - event := new(PDPVerifierProofSetEmpty) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetEmpty", log); err != nil { +// Solidity: event PiecesRemoved(uint256 indexed setId, uint256[] pieceIds) +func (_PDPVerifier *PDPVerifierFilterer) ParsePiecesRemoved(log types.Log) (*PDPVerifierPiecesRemoved, error) { + event := new(PDPVerifierPiecesRemoved) + if err := _PDPVerifier.contract.UnpackLog(event, "PiecesRemoved", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierProofSetOwnerChangedIterator is returned from FilterProofSetOwnerChanged and is used to iterate over the raw logs and unpacked data for ProofSetOwnerChanged events raised by the PDPVerifier contract. -type PDPVerifierProofSetOwnerChangedIterator struct { - Event *PDPVerifierProofSetOwnerChanged // Event containing the contract specifics and raw log +// PDPVerifierPossessionProvenIterator is returned from FilterPossessionProven and is used to iterate over the raw logs and unpacked data for PossessionProven events raised by the PDPVerifier contract. +type PDPVerifierPossessionProvenIterator struct { + Event *PDPVerifierPossessionProven // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2798,7 +2929,7 @@ type PDPVerifierProofSetOwnerChangedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierProofSetOwnerChangedIterator) Next() bool { +func (it *PDPVerifierPossessionProvenIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2807,7 +2938,7 @@ func (it *PDPVerifierProofSetOwnerChangedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetOwnerChanged) + it.Event = new(PDPVerifierPossessionProven) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2822,7 +2953,7 @@ func (it *PDPVerifierProofSetOwnerChangedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierProofSetOwnerChanged) + it.Event = new(PDPVerifierPossessionProven) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2838,69 +2969,52 @@ func (it *PDPVerifierProofSetOwnerChangedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierProofSetOwnerChangedIterator) Error() error { +func (it *PDPVerifierPossessionProvenIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierProofSetOwnerChangedIterator) Close() error { +func (it *PDPVerifierPossessionProvenIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierProofSetOwnerChanged represents a ProofSetOwnerChanged event raised by the PDPVerifier contract. -type PDPVerifierProofSetOwnerChanged struct { - SetId *big.Int - OldOwner common.Address - NewOwner common.Address - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierPossessionProven represents a PossessionProven event raised by the PDPVerifier contract. +type PDPVerifierPossessionProven struct { + SetId *big.Int + Challenges []IPDPTypesPieceIdAndOffset + Raw types.Log // Blockchain specific contextual infos } -// FilterProofSetOwnerChanged is a free log retrieval operation binding the contract event 0xd3273037b635678293ef0c076bd77af13760e75e12806d1db237616d03c3a766. +// FilterPossessionProven is a free log retrieval operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. // -// Solidity: event ProofSetOwnerChanged(uint256 indexed setId, address indexed oldOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) FilterProofSetOwnerChanged(opts *bind.FilterOpts, setId []*big.Int, oldOwner []common.Address, newOwner []common.Address) (*PDPVerifierProofSetOwnerChangedIterator, error) { +// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) +func (_PDPVerifier *PDPVerifierFilterer) FilterPossessionProven(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierPossessionProvenIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - var oldOwnerRule []interface{} - for _, oldOwnerItem := range oldOwner { - oldOwnerRule = append(oldOwnerRule, oldOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofSetOwnerChanged", setIdRule, oldOwnerRule, newOwnerRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "PossessionProven", setIdRule) if err != nil { return nil, err } - return &PDPVerifierProofSetOwnerChangedIterator{contract: _PDPVerifier.contract, event: "ProofSetOwnerChanged", logs: logs, sub: sub}, nil + return &PDPVerifierPossessionProvenIterator{contract: _PDPVerifier.contract, event: "PossessionProven", logs: logs, sub: sub}, nil } -// WatchProofSetOwnerChanged is a free log subscription operation binding the contract event 0xd3273037b635678293ef0c076bd77af13760e75e12806d1db237616d03c3a766. +// WatchPossessionProven is a free log subscription operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. // -// Solidity: event ProofSetOwnerChanged(uint256 indexed setId, address indexed oldOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetOwnerChanged(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofSetOwnerChanged, setId []*big.Int, oldOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { +// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) +func (_PDPVerifier *PDPVerifierFilterer) WatchPossessionProven(opts *bind.WatchOpts, sink chan<- *PDPVerifierPossessionProven, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - var oldOwnerRule []interface{} - for _, oldOwnerItem := range oldOwner { - oldOwnerRule = append(oldOwnerRule, oldOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofSetOwnerChanged", setIdRule, oldOwnerRule, newOwnerRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "PossessionProven", setIdRule) if err != nil { return nil, err } @@ -2910,8 +3024,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetOwnerChanged(opts *bind.Wa select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierProofSetOwnerChanged) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetOwnerChanged", log); err != nil { + event := new(PDPVerifierPossessionProven) + if err := _PDPVerifier.contract.UnpackLog(event, "PossessionProven", log); err != nil { return err } event.Raw = log @@ -2932,21 +3046,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchProofSetOwnerChanged(opts *bind.Wa }), nil } -// ParseProofSetOwnerChanged is a log parse operation binding the contract event 0xd3273037b635678293ef0c076bd77af13760e75e12806d1db237616d03c3a766. +// ParsePossessionProven is a log parse operation binding the contract event 0x1acf7df9f0c1b0208c23be6178950c0273f89b766805a2c0bd1e53d25c700e50. // -// Solidity: event ProofSetOwnerChanged(uint256 indexed setId, address indexed oldOwner, address indexed newOwner) -func (_PDPVerifier *PDPVerifierFilterer) ParseProofSetOwnerChanged(log types.Log) (*PDPVerifierProofSetOwnerChanged, error) { - event := new(PDPVerifierProofSetOwnerChanged) - if err := _PDPVerifier.contract.UnpackLog(event, "ProofSetOwnerChanged", log); err != nil { +// Solidity: event PossessionProven(uint256 indexed setId, (uint256,uint256)[] challenges) +func (_PDPVerifier *PDPVerifierFilterer) ParsePossessionProven(log types.Log) (*PDPVerifierPossessionProven, error) { + event := new(PDPVerifierPossessionProven) + if err := _PDPVerifier.contract.UnpackLog(event, "PossessionProven", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierRootsAddedIterator is returned from FilterRootsAdded and is used to iterate over the raw logs and unpacked data for RootsAdded events raised by the PDPVerifier contract. -type PDPVerifierRootsAddedIterator struct { - Event *PDPVerifierRootsAdded // Event containing the contract specifics and raw log +// PDPVerifierProofFeePaidIterator is returned from FilterProofFeePaid and is used to iterate over the raw logs and unpacked data for ProofFeePaid events raised by the PDPVerifier contract. +type PDPVerifierProofFeePaidIterator struct { + Event *PDPVerifierProofFeePaid // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -2960,7 +3074,7 @@ type PDPVerifierRootsAddedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierRootsAddedIterator) Next() bool { +func (it *PDPVerifierProofFeePaidIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -2969,7 +3083,7 @@ func (it *PDPVerifierRootsAddedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierRootsAdded) + it.Event = new(PDPVerifierProofFeePaid) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -2984,7 +3098,7 @@ func (it *PDPVerifierRootsAddedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierRootsAdded) + it.Event = new(PDPVerifierProofFeePaid) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -3000,52 +3114,54 @@ func (it *PDPVerifierRootsAddedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierRootsAddedIterator) Error() error { +func (it *PDPVerifierProofFeePaidIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierRootsAddedIterator) Close() error { +func (it *PDPVerifierProofFeePaidIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierRootsAdded represents a RootsAdded event raised by the PDPVerifier contract. -type PDPVerifierRootsAdded struct { - SetId *big.Int - RootIds []*big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierProofFeePaid represents a ProofFeePaid event raised by the PDPVerifier contract. +type PDPVerifierProofFeePaid struct { + SetId *big.Int + Fee *big.Int + Price uint64 + Expo int32 + Raw types.Log // Blockchain specific contextual infos } -// FilterRootsAdded is a free log retrieval operation binding the contract event 0x5ce51a8003915c377679ba533d9dafa0792058b254965697e674272f13f4fdd3. +// FilterProofFeePaid is a free log retrieval operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. // -// Solidity: event RootsAdded(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) FilterRootsAdded(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierRootsAddedIterator, error) { +// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) +func (_PDPVerifier *PDPVerifierFilterer) FilterProofFeePaid(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierProofFeePaidIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "RootsAdded", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "ProofFeePaid", setIdRule) if err != nil { return nil, err } - return &PDPVerifierRootsAddedIterator{contract: _PDPVerifier.contract, event: "RootsAdded", logs: logs, sub: sub}, nil + return &PDPVerifierProofFeePaidIterator{contract: _PDPVerifier.contract, event: "ProofFeePaid", logs: logs, sub: sub}, nil } -// WatchRootsAdded is a free log subscription operation binding the contract event 0x5ce51a8003915c377679ba533d9dafa0792058b254965697e674272f13f4fdd3. +// WatchProofFeePaid is a free log subscription operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. // -// Solidity: event RootsAdded(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) WatchRootsAdded(opts *bind.WatchOpts, sink chan<- *PDPVerifierRootsAdded, setId []*big.Int) (event.Subscription, error) { +// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) +func (_PDPVerifier *PDPVerifierFilterer) WatchProofFeePaid(opts *bind.WatchOpts, sink chan<- *PDPVerifierProofFeePaid, setId []*big.Int) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "RootsAdded", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "ProofFeePaid", setIdRule) if err != nil { return nil, err } @@ -3055,8 +3171,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchRootsAdded(opts *bind.WatchOpts, s select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierRootsAdded) - if err := _PDPVerifier.contract.UnpackLog(event, "RootsAdded", log); err != nil { + event := new(PDPVerifierProofFeePaid) + if err := _PDPVerifier.contract.UnpackLog(event, "ProofFeePaid", log); err != nil { return err } event.Raw = log @@ -3077,21 +3193,21 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchRootsAdded(opts *bind.WatchOpts, s }), nil } -// ParseRootsAdded is a log parse operation binding the contract event 0x5ce51a8003915c377679ba533d9dafa0792058b254965697e674272f13f4fdd3. +// ParseProofFeePaid is a log parse operation binding the contract event 0x928bbf5188022bf8b9a0e59f5e81e179d0a4c729bdba2856ac971af2063fbf2b. // -// Solidity: event RootsAdded(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) ParseRootsAdded(log types.Log) (*PDPVerifierRootsAdded, error) { - event := new(PDPVerifierRootsAdded) - if err := _PDPVerifier.contract.UnpackLog(event, "RootsAdded", log); err != nil { +// Solidity: event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo) +func (_PDPVerifier *PDPVerifierFilterer) ParseProofFeePaid(log types.Log) (*PDPVerifierProofFeePaid, error) { + event := new(PDPVerifierProofFeePaid) + if err := _PDPVerifier.contract.UnpackLog(event, "ProofFeePaid", log); err != nil { return nil, err } event.Raw = log return event, nil } -// PDPVerifierRootsRemovedIterator is returned from FilterRootsRemoved and is used to iterate over the raw logs and unpacked data for RootsRemoved events raised by the PDPVerifier contract. -type PDPVerifierRootsRemovedIterator struct { - Event *PDPVerifierRootsRemoved // Event containing the contract specifics and raw log +// PDPVerifierStorageProviderChangedIterator is returned from FilterStorageProviderChanged and is used to iterate over the raw logs and unpacked data for StorageProviderChanged events raised by the PDPVerifier contract. +type PDPVerifierStorageProviderChangedIterator struct { + Event *PDPVerifierStorageProviderChanged // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -3105,7 +3221,7 @@ type PDPVerifierRootsRemovedIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *PDPVerifierRootsRemovedIterator) Next() bool { +func (it *PDPVerifierStorageProviderChangedIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -3114,7 +3230,7 @@ func (it *PDPVerifierRootsRemovedIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(PDPVerifierRootsRemoved) + it.Event = new(PDPVerifierStorageProviderChanged) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -3129,7 +3245,7 @@ func (it *PDPVerifierRootsRemovedIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(PDPVerifierRootsRemoved) + it.Event = new(PDPVerifierStorageProviderChanged) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -3145,52 +3261,69 @@ func (it *PDPVerifierRootsRemovedIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *PDPVerifierRootsRemovedIterator) Error() error { +func (it *PDPVerifierStorageProviderChangedIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *PDPVerifierRootsRemovedIterator) Close() error { +func (it *PDPVerifierStorageProviderChangedIterator) Close() error { it.sub.Unsubscribe() return nil } -// PDPVerifierRootsRemoved represents a RootsRemoved event raised by the PDPVerifier contract. -type PDPVerifierRootsRemoved struct { - SetId *big.Int - RootIds []*big.Int - Raw types.Log // Blockchain specific contextual infos +// PDPVerifierStorageProviderChanged represents a StorageProviderChanged event raised by the PDPVerifier contract. +type PDPVerifierStorageProviderChanged struct { + SetId *big.Int + OldStorageProvider common.Address + NewStorageProvider common.Address + Raw types.Log // Blockchain specific contextual infos } -// FilterRootsRemoved is a free log retrieval operation binding the contract event 0xd22bb0ee05b8ca92312459c76223d3b9bc1bd96fb6c9b18e637ededf92d81174. +// FilterStorageProviderChanged is a free log retrieval operation binding the contract event 0x686146a80f2bf4dc855942926481871515b39b508826d7982a2e0212d20552c9. // -// Solidity: event RootsRemoved(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) FilterRootsRemoved(opts *bind.FilterOpts, setId []*big.Int) (*PDPVerifierRootsRemovedIterator, error) { +// Solidity: event StorageProviderChanged(uint256 indexed setId, address indexed oldStorageProvider, address indexed newStorageProvider) +func (_PDPVerifier *PDPVerifierFilterer) FilterStorageProviderChanged(opts *bind.FilterOpts, setId []*big.Int, oldStorageProvider []common.Address, newStorageProvider []common.Address) (*PDPVerifierStorageProviderChangedIterator, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } + var oldStorageProviderRule []interface{} + for _, oldStorageProviderItem := range oldStorageProvider { + oldStorageProviderRule = append(oldStorageProviderRule, oldStorageProviderItem) + } + var newStorageProviderRule []interface{} + for _, newStorageProviderItem := range newStorageProvider { + newStorageProviderRule = append(newStorageProviderRule, newStorageProviderItem) + } - logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "RootsRemoved", setIdRule) + logs, sub, err := _PDPVerifier.contract.FilterLogs(opts, "StorageProviderChanged", setIdRule, oldStorageProviderRule, newStorageProviderRule) if err != nil { return nil, err } - return &PDPVerifierRootsRemovedIterator{contract: _PDPVerifier.contract, event: "RootsRemoved", logs: logs, sub: sub}, nil + return &PDPVerifierStorageProviderChangedIterator{contract: _PDPVerifier.contract, event: "StorageProviderChanged", logs: logs, sub: sub}, nil } -// WatchRootsRemoved is a free log subscription operation binding the contract event 0xd22bb0ee05b8ca92312459c76223d3b9bc1bd96fb6c9b18e637ededf92d81174. +// WatchStorageProviderChanged is a free log subscription operation binding the contract event 0x686146a80f2bf4dc855942926481871515b39b508826d7982a2e0212d20552c9. // -// Solidity: event RootsRemoved(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) WatchRootsRemoved(opts *bind.WatchOpts, sink chan<- *PDPVerifierRootsRemoved, setId []*big.Int) (event.Subscription, error) { +// Solidity: event StorageProviderChanged(uint256 indexed setId, address indexed oldStorageProvider, address indexed newStorageProvider) +func (_PDPVerifier *PDPVerifierFilterer) WatchStorageProviderChanged(opts *bind.WatchOpts, sink chan<- *PDPVerifierStorageProviderChanged, setId []*big.Int, oldStorageProvider []common.Address, newStorageProvider []common.Address) (event.Subscription, error) { var setIdRule []interface{} for _, setIdItem := range setId { setIdRule = append(setIdRule, setIdItem) } + var oldStorageProviderRule []interface{} + for _, oldStorageProviderItem := range oldStorageProvider { + oldStorageProviderRule = append(oldStorageProviderRule, oldStorageProviderItem) + } + var newStorageProviderRule []interface{} + for _, newStorageProviderItem := range newStorageProvider { + newStorageProviderRule = append(newStorageProviderRule, newStorageProviderItem) + } - logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "RootsRemoved", setIdRule) + logs, sub, err := _PDPVerifier.contract.WatchLogs(opts, "StorageProviderChanged", setIdRule, oldStorageProviderRule, newStorageProviderRule) if err != nil { return nil, err } @@ -3200,8 +3333,8 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchRootsRemoved(opts *bind.WatchOpts, select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(PDPVerifierRootsRemoved) - if err := _PDPVerifier.contract.UnpackLog(event, "RootsRemoved", log); err != nil { + event := new(PDPVerifierStorageProviderChanged) + if err := _PDPVerifier.contract.UnpackLog(event, "StorageProviderChanged", log); err != nil { return err } event.Raw = log @@ -3222,12 +3355,12 @@ func (_PDPVerifier *PDPVerifierFilterer) WatchRootsRemoved(opts *bind.WatchOpts, }), nil } -// ParseRootsRemoved is a log parse operation binding the contract event 0xd22bb0ee05b8ca92312459c76223d3b9bc1bd96fb6c9b18e637ededf92d81174. +// ParseStorageProviderChanged is a log parse operation binding the contract event 0x686146a80f2bf4dc855942926481871515b39b508826d7982a2e0212d20552c9. // -// Solidity: event RootsRemoved(uint256 indexed setId, uint256[] rootIds) -func (_PDPVerifier *PDPVerifierFilterer) ParseRootsRemoved(log types.Log) (*PDPVerifierRootsRemoved, error) { - event := new(PDPVerifierRootsRemoved) - if err := _PDPVerifier.contract.UnpackLog(event, "RootsRemoved", log); err != nil { +// Solidity: event StorageProviderChanged(uint256 indexed setId, address indexed oldStorageProvider, address indexed newStorageProvider) +func (_PDPVerifier *PDPVerifierFilterer) ParseStorageProviderChanged(log types.Log) (*PDPVerifierStorageProviderChanged, error) { + event := new(PDPVerifierStorageProviderChanged) + if err := _PDPVerifier.contract.UnpackLog(event, "StorageProviderChanged", log); err != nil { return nil, err } event.Raw = log diff --git a/pdp/contract/types.go b/pdp/contract/types.go new file mode 100644 index 000000000..cf7c9bf89 --- /dev/null +++ b/pdp/contract/types.go @@ -0,0 +1,6 @@ +package contract + +// PieceData matches the Solidity PieceData struct +type PieceData struct { + Piece struct{ Data []byte } +} diff --git a/pdp/contract/utils.go b/pdp/contract/utils.go new file mode 100644 index 000000000..72167f2bf --- /dev/null +++ b/pdp/contract/utils.go @@ -0,0 +1,37 @@ +package contract + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "golang.org/x/xerrors" +) + +// GetProvingScheduleFromListener checks if a listener has a view contract and returns +// an IPDPProvingSchedule instance bound to the appropriate address. +// It uses the view contract address if available, otherwise uses the listener address directly. +func GetProvingScheduleFromListener(listenerAddr common.Address, ethClient *ethclient.Client) (*IPDPProvingSchedule, error) { + // Try to get the view contract address from the listener + provingScheduleAddr := listenerAddr + + // Check if the listener supports the viewContractAddress method + listenerService, err := NewListenerServiceWithViewContract(listenerAddr, ethClient) + if err == nil { + // Try to get the view contract address + viewAddr, err := listenerService.ViewContractAddress(nil) + if err == nil && viewAddr != (common.Address{}) { + // Use the view contract for proving schedule operations + provingScheduleAddr = viewAddr + } + } + + // Create and return the IPDPProvingSchedule binding + // This works whether provingScheduleAddr points to: + // - The view contract (which must implement IPDPProvingSchedule) + // - The listener itself (where listener must implement IPDPProvingSchedule) + provingSchedule, err := NewIPDPProvingSchedule(provingScheduleAddr, ethClient) + if err != nil { + return nil, xerrors.Errorf("failed to create proving schedule binding: %w", err) + } + + return provingSchedule, nil +} diff --git a/pdp/handlers.go b/pdp/handlers.go index 0bf57c503..1ae8229c6 100644 --- a/pdp/handlers.go +++ b/pdp/handlers.go @@ -33,8 +33,7 @@ import ( types2 "github.com/filecoin-project/lotus/chain/types" ) -// PDPRoutePath is the base path for PDP routes -const PDPRoutePath = "/pdp" +const requestTimeout = 10 * time.Second // PDPService represents the service for managing proof sets and pieces type PDPService struct { @@ -65,27 +64,30 @@ func NewPDPService(db *harmonydb.DB, stor paths.StashStore, ec *ethclient.Client } // Routes registers the HTTP routes with the provided router -func Routes(r *chi.Mux, p *PDPService) { +func Routes(p *PDPService) http.Handler { + + r := chi.NewRouter() + // Routes for proof sets - r.Route(path.Join(PDPRoutePath, "/proof-sets"), func(r chi.Router) { + r.Route("/proof-sets", func(r chi.Router) { // POST /pdp/proof-sets - Create a new proof set - r.Post("/", p.handleCreateProofSet) + r.Method("POST", "/", http.TimeoutHandler(http.HandlerFunc(p.handleCreateProofSet), requestTimeout, "request timeout")) // GET /pdp/proof-sets/created/{txHash} - Get the status of a proof set creation - r.Get("/created/{txHash}", p.handleGetProofSetCreationStatus) + r.Method("GET", "/created/{txHash}", http.TimeoutHandler(http.HandlerFunc(p.handleGetProofSetCreationStatus), requestTimeout, "request timeout")) // Individual proof set routes r.Route("/{proofSetID}", func(r chi.Router) { // GET /pdp/proof-sets/{set-id} - r.Get("/", p.handleGetProofSet) + r.Method("GET", "/", http.TimeoutHandler(http.HandlerFunc(p.handleGetProofSet), requestTimeout, "request timeout")) // DEL /pdp/proof-sets/{set-id} - r.Delete("/", p.handleDeleteProofSet) + r.Method("DELETE", "/", http.TimeoutHandler(http.HandlerFunc(p.handleDeleteProofSet), requestTimeout, "request timeout")) // Routes for roots within a proof set r.Route("/roots", func(r chi.Router) { // POST /pdp/proof-sets/{set-id}/roots - r.Post("/", p.handleAddRootToProofSet) + r.Method("POST", "/", http.TimeoutHandler(http.HandlerFunc(p.handleAddRootToProofSet), requestTimeout, "request timeout")) // GET /pdp/proof-sets/{set-id}/roots/added/{txHash} r.Get("/added/{txHash}", p.handleGetRootAdditionStatus) @@ -93,26 +95,27 @@ func Routes(r *chi.Mux, p *PDPService) { // Individual root routes r.Route("/{rootID}", func(r chi.Router) { // GET /pdp/proof-sets/{set-id}/roots/{root-id} - r.Get("/", p.handleGetProofSetRoot) + r.Method("GET", "/", http.TimeoutHandler(http.HandlerFunc(p.handleGetProofSetRoot), requestTimeout, "request timeout")) // DEL /pdp/proof-sets/{set-id}/roots/{root-id} - r.Delete("/", p.handleDeleteProofSetRoot) + r.Method("DELETE", "/", http.TimeoutHandler(http.HandlerFunc(p.handleDeleteProofSetRoot), requestTimeout, "request timeout")) }) }) }) }) - r.Get(path.Join(PDPRoutePath, "/ping"), p.handlePing) + r.Method("GET", "/ping", http.TimeoutHandler(http.HandlerFunc(p.handlePing), requestTimeout, "request timeout")) // Routes for piece storage and retrieval // POST /pdp/piece - r.Post(path.Join(PDPRoutePath, "/piece"), p.handlePiecePost) + r.Method("POST", "/piece", http.TimeoutHandler(http.HandlerFunc(p.handlePiecePost), requestTimeout, "request timeout")) // GET /pdp/piece/ - r.Get(path.Join(PDPRoutePath, "/piece/"), p.handleFindPiece) + r.Method("GET", "/piece", http.TimeoutHandler(http.HandlerFunc(p.handleFindPiece), requestTimeout, "request timeout")) // PUT /pdp/piece/upload/{uploadUUID} - r.Put(path.Join(PDPRoutePath, "/piece/upload/{uploadUUID}"), p.handlePieceUpload) + r.Put("/piece/upload/{uploadUUID}", p.handlePieceUpload) + return r } // Handler functions @@ -357,7 +360,7 @@ func (p *PDPService) handleGetProofSetCreationStatus(w http.ResponseWriter, r *h WHERE create_message_hash = $1 `, txHash).Scan(&proofSetCreate.CreateMessageHash, &proofSetCreate.OK, &proofSetCreate.ProofSetCreated, &proofSetCreate.Service) if err != nil { - if err == sql.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { http.Error(w, "Proof set creation not found for given txHash", http.StatusNotFound) return } @@ -394,7 +397,7 @@ func (p *PDPService) handleGetProofSetCreationStatus(w http.ResponseWriter, r *h WHERE signed_tx_hash = $1 `, txHash).Scan(&txStatus) if err != nil { - if err == sql.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { // This should not happen as per foreign key constraints http.Error(w, "Message status not found for given txHash", http.StatusInternalServerError) return diff --git a/pdp/handlers_upload.go b/pdp/handlers_upload.go index fec8ebdfc..69efc0840 100644 --- a/pdp/handlers_upload.go +++ b/pdp/handlers_upload.go @@ -6,6 +6,7 @@ import ( "database/sql" "encoding/hex" "encoding/json" + "errors" "fmt" "hash" "io" @@ -84,7 +85,7 @@ func (ph *PieceHash) commp(ctx context.Context, db *harmonydb.DB) (cid.Cid, bool SELECT commp FROM pdp_piece_mh_to_commp WHERE mhash = $1 AND size = $2 `, mh, ph.Size).Scan(&commpStr) if err != nil { - if err == pgx.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { return cid.Undef, false, nil } return cid.Undef, false, fmt.Errorf("failed to query pdp_piece_mh_to_commp: %w", err) @@ -205,7 +206,7 @@ func (p *PDPService) handlePiecePost(w http.ResponseWriter, r *http.Request) { } // Create a location URL where the piece data can be uploaded via PUT - uploadURL = path.Join(PDPRoutePath, "/piece/upload", uploadUUID.String()) + uploadURL = path.Join(r.URL.Path, "upload", uploadUUID.String()) responseStatus = http.StatusCreated return true, nil // Commit the transaction @@ -260,7 +261,7 @@ func (p *PDPService) handlePieceUpload(w http.ResponseWriter, r *http.Request) { SELECT piece_cid, notify_url, piece_ref, check_hash_codec, check_hash, check_size FROM pdp_piece_uploads WHERE id = $1 `, uploadUUID.String()).Scan(&pieceCIDStr, ¬ifyURL, &pieceRef, &checkHashName, &checkHash, &checkSize) if err != nil { - if err == sql.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { http.Error(w, "Upload UUID not found", http.StatusNotFound) } else { http.Error(w, "Database error", http.StatusInternalServerError) diff --git a/tasks/gc/pipeline_meta_gc.go b/tasks/gc/pipeline_meta_gc.go index 9dc7269d4..094da2953 100644 --- a/tasks/gc/pipeline_meta_gc.go +++ b/tasks/gc/pipeline_meta_gc.go @@ -38,6 +38,12 @@ func (s *PipelineGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("cleanupUnseal: %w", err) } + if err := s.cleanupMK20DealPipeline(); err != nil { + return false, xerrors.Errorf("cleanupMK20DealPipeline: %w", err) + } + if err := s.cleanupPDPPipeline(); err != nil { + return false, xerrors.Errorf("cleanupPDPPipeline: %w", err) + } return true, nil } @@ -158,12 +164,8 @@ func (s *PipelineGC) cleanupUpgrade() error { } func (s *PipelineGC) cleanupMK12DealPipeline() error { - // Remove market_mk12_deal_pipeline entries where: - // sealed is true and indexed is true ctx := context.Background() - // Execute the query - // NOTE: pipelines can be complete before indexing finishes in case of reindexing pipeline tasks (created in CheckIndex task) _, err := s.db.Exec(ctx, `DELETE FROM market_mk12_deal_pipeline WHERE (should_index = FALSE OR indexed = TRUE) AND complete = TRUE;`) if err != nil { return xerrors.Errorf("failed to clean up sealed deals: %w", err) @@ -177,6 +179,34 @@ func (s *PipelineGC) cleanupMK12DealPipeline() error { return nil } +func (s *PipelineGC) cleanupMK20DealPipeline() error { + ctx := context.Background() + + _, err := s.db.Exec(ctx, `DELETE FROM market_mk20_offline_urls + WHERE id IN ( + SELECT id FROM market_mk20_pipeline WHERE complete = TRUE)`) + if err != nil { + return xerrors.Errorf("failed to clean up offline urls: %w", err) + } + _, err = s.db.Exec(ctx, `DELETE FROM market_mk20_download_pipeline + WHERE id IN ( + SELECT id FROM market_mk20_pipeline WHERE complete = TRUE)`) + if err != nil { + return xerrors.Errorf("failed to clean up download pipeline: %w", err) + } + _, err = s.db.Exec(ctx, `DELETE FROM market_mk20_pipeline WHERE (indexing = FALSE OR indexed = TRUE) AND complete = TRUE;`) + if err != nil { + return xerrors.Errorf("failed to clean up sealed deals: %w", err) + } + + _, err = s.db.Exec(ctx, `DELETE FROM pdp_ipni_task WHERE complete = TRUE;`) + if err != nil { + return xerrors.Errorf("failed to clean up PDP indexing tasks: %w", err) + } + + return nil +} + func (s *PipelineGC) cleanupUnseal() error { // Remove sectors_unseal_pipeline entries where: // after_unseal_sdr is true @@ -196,5 +226,14 @@ func (s *PipelineGC) cleanupUnseal() error { return nil } +func (s *PipelineGC) cleanupPDPPipeline() error { + ctx := context.Background() + _, err := s.db.Exec(ctx, `DELETE FROM pdp_pipeline WHERE complete = TRUE;`) + if err != nil { + return xerrors.Errorf("failed to clean up sealed deals: %w", err) + } + return nil +} + var _ harmonytask.TaskInterface = &PipelineGC{} var _ = harmonytask.Reg(&PipelineGC{}) diff --git a/tasks/gc/storage_gc_mark.go b/tasks/gc/storage_gc_mark.go index de4ee17c2..f1fdd14a2 100644 --- a/tasks/gc/storage_gc_mark.go +++ b/tasks/gc/storage_gc_mark.go @@ -324,6 +324,9 @@ func (s *StorageGCMark) Do(taskID harmonytask.TaskID, stillOwned func() bool) (d lb := policy.GetWinningPoStSectorSetLookback(nv) + builtin.EpochsInDay + 1 finalityHeight := head.Height() - lb + if finalityHeight < 0 { + finalityHeight = 1 + } finalityTipset, err := s.api.ChainGetTipSetByHeight(ctx, finalityHeight, head.Key()) if err != nil { diff --git a/tasks/gc/task_cleanup_piece.go b/tasks/gc/task_cleanup_piece.go new file mode 100644 index 000000000..7affddd29 --- /dev/null +++ b/tasks/gc/task_cleanup_piece.go @@ -0,0 +1,732 @@ +package gc + +import ( + "bytes" + "context" + "database/sql" + "errors" + "strings" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/promise" + "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/ipni/types" + "github.com/filecoin-project/curio/market/mk20" + "github.com/filecoin-project/curio/tasks/indexing" +) + +type PieceCleanupTask struct { + db *harmonydb.DB + indexStore *indexstore.IndexStore + TF promise.Promise[harmonytask.AddTaskFunc] +} + +func NewPieceCleanupTask(db *harmonydb.DB, indexStore *indexstore.IndexStore) *PieceCleanupTask { + return &PieceCleanupTask{ + db: db, + indexStore: indexStore, + } +} + +func (p *PieceCleanupTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + // TODO: Optimize this Do() as it is currently cumbersome, repetitive and slow. Fix this in a new PR + // TODO: Plug this into PoRep 1.2 and 2.0 clean up as well + // TODO: Remove Deal from MK12 and Mk20? + + ctx := context.Background() + + // To avoid static naming + pdpIpni := indexing.NewPDPIPNITask(nil, nil, nil, nil, taskhelp.Max(0)) + pdpIpniName := pdpIpni.TypeDetails().Name + + poRepIpni := indexing.NewIPNITask(nil, nil, nil, nil, nil, taskhelp.Max(0)) + poRepIpniName := poRepIpni.TypeDetails().Name + + var tasks []struct { + ID string `db:"id"` + PieceCid string `db:"piece_cid_v2"` + PDP bool `db:"pdp"` + } + + err = p.db.Select(ctx, &tasks, `SELECT id, piece_cid_v2, pdp FROM piece_cleanup WHERE task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to get piece cleanup task: %w", err) + } + + if len(tasks) != 1 { + return false, xerrors.Errorf("expected 1 piece cleanup task but got %d", len(tasks)) + } + + task := tasks[0] + + var isMK12 bool + var isMK20 bool + _, err = uuid.Parse(task.ID) + if err == nil { + isMK12 = true + } else { + _, err = ulid.Parse(task.ID) + if err == nil { + isMK20 = true + } + if err != nil { + return false, xerrors.Errorf("failed to parse task ID %s: %w", task.ID, err) + } + } + + pcid2, err := cid.Parse(task.PieceCid) + if err != nil { + return false, xerrors.Errorf("failed to parse piece cid: %w", err) + } + + pi, err := mk20.GetPieceInfo(pcid2) + if err != nil { + return false, xerrors.Errorf("failed to get piece info for piece %s: %w", pcid2, err) + } + + // Did we index this piece? + var indexed bool + err = p.db.QueryRow(ctx, `SELECT indexed FROM market_piece_metadata WHERE piece_cid = $1 AND piece_size = $2`, pi.PieceCIDV1.String(), pi.Size).Scan(&indexed) + if err != nil { + return false, xerrors.Errorf("failed to check if piece if indexe: %w", err) + } + + dropIndex := true + + type pd struct { + ID string `db:"id"` + SPID int64 `db:"sp_id"` + Sector int64 `db:"sector_num"` + PieceRef sql.NullInt64 `db:"piece_ref"` + } + + var toRM *pd + + var pieceDeals []pd + + // Let's piece deals as we need to make a complicated decision about IPNI and Indexing + err = p.db.Select(ctx, &pieceDeals, `SELECT id, + sp_id, + sector_num, + piece_ref + FROM market_piece_deal WHERE piece_cid = $1 AND piece_length = $2`, pi.PieceCIDV1.String(), pi.Size) + if err != nil { + return false, xerrors.Errorf("failed to get piece deals: %w", err) + } + + if len(pieceDeals) == 0 { + // This could be due to partial clean up + log.Infof("No piece deals found for piece %s", taskID) + return false, nil + } + /* + Get a list of piece deals + 1. If only single row then check + a) MK1.2 + i) publish IPNI removal ad + ii) Drop index + b) MK2.0 + i) Publish IPNI Ad based on attached product + ii) Drop index if any + iii) Drop Aggregate index + 2. If multiple rows then, check if + a.) MK1.2 + i) If any of the deals is MK1.2 and is not the deal we are cleaning then keep the indexes and don't publish IPNI rm Ad + ii) If there are any MK2.0 deal then check if they are PoRep or PDP + a.) If any of the deals is MK1.2 and is not the deal we are cleaning then keep the indexes + b.) If 2 rows, with same ID then we have PoRep and PDP for same deal. Clean up based on product. + c.) If we have multiple rows with different MK2.0 deals then we need to make a complex decision + i) Check if any of them apart from deal we are cleaning is paying to keep index. If yes, then don't remove them + ii) Check if any of them is paying to keep IPNI payload announced apart from deal we are cleaning. If yes, then don't publish RM ad + iii) Don't publish RM ad for IPNI piece if we have any other PDP deals + */ + + if len(pieceDeals) == 1 { + // Single piece deal, then drop index if deal ID matches + pieceDeal := pieceDeals[0] + if task.ID != pieceDeal.ID { + return false, xerrors.Errorf("piece deal ID %s does not match task ID %s", pieceDeal.ID, task.ID) + } + toRM = &pieceDeal + + UUID, err := uuid.Parse(task.ID) + if err == nil { + pinfo := abi.PieceInfo{ + PieceCID: pi.PieceCIDV1, + Size: pi.Size, + } + b := new(bytes.Buffer) + err = pinfo.MarshalCBOR(b) + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var peer string + err = tx.QueryRow(`SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, pieceDeal.SPID).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for sp_id %d", pieceDeal.SPID) + } + + _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8, $9)`, UUID.String(), pieceDeal.SPID, pieceDeal.Sector, 0, 0, b.Bytes(), true, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, UUID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, UUID) + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, poRepIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } else { + lid, err := ulid.Parse(pieceDeal.ID) + if err == nil { + + deal, err := mk20.DealFromDB(ctx, p.db, lid) + if err != nil { + return false, xerrors.Errorf("failed to get deal for id %s: %w", lid, err) + } + + if deal.Products.RetrievalV1 == nil { + // Return early, we don't need to drop index or publish rm ads + return true, nil + } + + retv := deal.Products.RetrievalV1 + + if task.PDP { + // Let's publish PDP removal first + var peer string + err = p.db.QueryRow(ctx, `SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, -1).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for PDP provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for PDP") + } + + if retv.AnnouncePiece { + pinfo := types.PdpIpniContext{ + PieceCID: pcid2, + Payload: false, + } + ctxB, err := pinfo.Marshal() + if err != nil { + return false, xerrors.Errorf("failed to marshal pdp context for piece %s: %w", pcid2, err) + } + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, ctxB, true, lid.String(), peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, UUID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, UUID) + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, pdpIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + + } + if retv.AnnouncePayload { + pinfo := types.PdpIpniContext{ + PieceCID: pcid2, + Payload: true, + } + ctxB, err := pinfo.Marshal() + if err != nil { + return false, xerrors.Errorf("failed to marshal pdp context for piece %s: %w", pcid2, err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, ctxB, true, lid.String(), peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, UUID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, UUID) + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, pdpIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } + } else { + // This is a PoRep clean up + pinfo := abi.PieceInfo{ + PieceCID: pi.PieceCIDV1, + Size: pi.Size, + } + b := new(bytes.Buffer) + err = pinfo.MarshalCBOR(b) + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + var peer string + err = p.db.QueryRow(ctx, `SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, pieceDeal.SPID).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for sp_id %d", pieceDeal.SPID) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8, $9)`, lid.String(), pieceDeal.SPID, pieceDeal.Sector, 0, 0, b.Bytes(), true, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, UUID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, UUID) + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, poRepIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } + } else { + return false, xerrors.Errorf("failed to parse piece deal ID %s: %w", pieceDeal.ID, err) + } + } + } else { + // If we have multiple rows + var mk12List []uuid.UUID + var mk20List []ulid.ULID + var pieceDeal pd + for _, pDeal := range pieceDeals { + if pDeal.ID == task.ID { + pieceDeal = pDeal + } + + uid, err := uuid.Parse(pDeal.ID) + if err == nil { + mk12List = append(mk12List, uid) + continue + } + lid, serr := ulid.Parse(pDeal.ID) + if serr == nil { + mk20List = append(mk20List, lid) + continue + } + return false, xerrors.Errorf("failed to parse piece deal ID %s: %w, %w", pieceDeal.ID, err, serr) + + } + toRM = &pieceDeal + lo.Uniq(mk12List) + lo.Uniq(mk20List) + if isMK12 { + rmAccounce := true + if len(mk12List) > 1 { + // Don't drop index or publish removal we have same piece in another deal + dropIndex = false + rmAccounce = false + } + if len(mk20List) > 0 { + for _, d := range mk20List { + deal, err := mk20.DealFromDB(ctx, p.db, d) + if err != nil { + return false, xerrors.Errorf("failed to get deal for id %s: %w", d, err) + } + if deal.Products.RetrievalV1 == nil { + continue + } + retv := deal.Products.RetrievalV1 + if retv.Indexing { + dropIndex = false + } + if retv.AnnouncePayload { + // No need to publish rm Ad as another MK20 deal is paying for it + rmAccounce = false + break + } + } + } + if rmAccounce { + pinfo := abi.PieceInfo{ + PieceCID: pi.PieceCIDV1, + Size: pi.Size, + } + b := new(bytes.Buffer) + err = pinfo.MarshalCBOR(b) + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var peer string + err = tx.QueryRow(`SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, pieceDeal.SPID).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for sp_id %d", pieceDeal.SPID) + } + + _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8, $9)`, pieceDeal.ID, pieceDeal.SPID, pieceDeal.Sector, 0, 0, b.Bytes(), true, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, pieceDeal.ID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, pieceDeal.ID) + return false, nil + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, poRepIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + return true, nil + }) + } + } + + if isMK20 { + rmAccounce := true + rmPiece := true + if len(mk12List) > 1 { + // Don't drop index or publish removal we have same piece in another deal + dropIndex = false + rmAccounce = false + } + if len(mk20List) > 0 { + for _, d := range mk20List { + deal, err := mk20.DealFromDB(ctx, p.db, d) + if err != nil { + return false, xerrors.Errorf("failed to get deal for id %s: %w", d, err) + } + if deal.Products.RetrievalV1 == nil { + continue + } + retv := deal.Products.RetrievalV1 + + // For the deal we are processing + if d.String() == task.ID { + // If we are cleaning up PDP then check PoRep + if task.PDP { + if deal.Products.DDOV1 != nil { + rmAccounce = false + } + } else { + // If we are cleaning up PoRep then check PDP + if deal.Products.PDPV1 != nil { + rmPiece = false + } + if retv.AnnouncePayload { + rmAccounce = false + } + } + } else { + if retv.AnnouncePiece { + rmPiece = false + } + if retv.AnnouncePayload { + rmAccounce = false + } + } + } + } + + if task.PDP { + var peer string + err = p.db.QueryRow(ctx, `SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, -1).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for PDP provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for PDP") + } + + if rmAccounce { + pinfo := types.PdpIpniContext{ + PieceCID: pcid2, + Payload: true, + } + ctxB, err := pinfo.Marshal() + if err != nil { + return false, xerrors.Errorf("failed to marshal pdp context for piece %s: %w", pcid2, err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, ctxB, true, task.ID, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, task.ID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, task.ID) + return false, nil + } + return false, xerrors.Errorf("failed to publish remove payload ad for piece %s in PDP: %w", pcid2, err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, pdpIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } + + if rmPiece { + pinfo := types.PdpIpniContext{ + PieceCID: pcid2, + Payload: false, + } + ctxB, err := pinfo.Marshal() + if err != nil { + return false, xerrors.Errorf("failed to marshal pdp context for piece %s: %w", pcid2, err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, ctxB, true, task.ID, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, task.ID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, task.ID) + return false, nil + } + return false, xerrors.Errorf("failed to publish remove piece ad for piece %s in PDP: %w", pcid2, err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, pdpIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } + } else { + pinfo := abi.PieceInfo{ + PieceCID: pi.PieceCIDV1, + Size: pi.Size, + } + b := new(bytes.Buffer) + err = pinfo.MarshalCBOR(b) + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var peer string + err = tx.QueryRow(`SELECT peer_id FROM ipni_peerid WHERE sp_id = $1`, pieceDeal.SPID).Scan(&peer) + if err != nil { + return false, xerrors.Errorf("failed to get peer id for provider: %w", err) + } + + if peer == "" { + return false, xerrors.Errorf("no peer id found for sp_id %d", pieceDeal.SPID) + } + + _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8, $9)`, pieceDeal.ID, pieceDeal.SPID, pieceDeal.Sector, 0, 0, b.Bytes(), true, peer, id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + log.Infof("Another IPNI announce task already present for piece %s in deal %s", pcid2, pieceDeal.ID) + return false, nil + } + if strings.Contains(err.Error(), "already published") { + log.Infof("Piece %s in deal %s is already published", pcid2, pieceDeal.ID) + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + // Fix the harmony_task.name + n, err := tx.Exec(`UPDATE harmony_task SET name = $1 WHERE id = $2`, poRepIpniName, id) + if err != nil { + return false, xerrors.Errorf("failed to update task name: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update task name: %d rows updated", n) + } + return true, nil + }) + } + } + } + + if dropIndex { + err = dropIndexes(ctx, p.indexStore, pcid2) + if err != nil { + return false, xerrors.Errorf("failed to drop indexes for piece %s: %w", pcid2, err) + } + err = dropAggregateIndex(ctx, p.indexStore, pcid2) + if err != nil { + return false, xerrors.Errorf("failed to drop aggregate index for piece %s: %w", pcid2, err) + } + } + + if task.PDP { + _, err = p.db.Exec(ctx, `SELECT remove_piece_deal($1, $2, $3, $4)`, task.ID, -1, pi.PieceCIDV1.String(), pi.Size) + } else { + _, err = p.db.Exec(ctx, `SELECT remove_piece_deal($1, $2, $3, $4)`, task.ID, toRM.SPID, pi.PieceCIDV1.String(), pi.Size) + } + + if err != nil { + return false, xerrors.Errorf("failed to remove piece deal: %w", err) + } + + _, err = p.db.Exec(ctx, `DELETE FROM piece_cleanup WHERE task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to remove piece cleanup task: %w", err) + } + + return true, nil +} + +func dropIndexes(ctx context.Context, indexStore *indexstore.IndexStore, pieceCid cid.Cid) error { + err := indexStore.RemoveIndexes(ctx, pieceCid) + if err != nil { + return xerrors.Errorf("failed to remove indexes for piece %s: %w", pieceCid, err) + } + return nil +} + +func dropAggregateIndex(ctx context.Context, indexStore *indexstore.IndexStore, pieceCid cid.Cid) error { + err := indexStore.RemoveAggregateIndex(ctx, pieceCid) + if err != nil { + return xerrors.Errorf("failed to remove aggregate index for piece %s: %w", pieceCid, err) + } + return nil +} + +func (p *PieceCleanupTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PieceCleanupTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PieceCleanup", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(5*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return p.schedule(context.Background(), taskFunc) + }), + } +} + +func (p *PieceCleanupTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + var pdp bool + err := tx.QueryRow(`SELECT id, pdp FROM piece_cleanup + WHERE task_id IS NULL + LIMIT 1`).Scan(&did, &pdp) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query piece_cleanup: %w", err) + } + + _, err = tx.Exec(`UPDATE piece_cleanup SET task_id = $1 WHERE id = $2 AND pdp = $3`, id, did, pdp) + if err != nil { + return false, xerrors.Errorf("failed to update piece_cleanup: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (p *PieceCleanupTask) Adder(taskFunc harmonytask.AddTaskFunc) { + p.TF.Set(taskFunc) +} + +var _ harmonytask.TaskInterface = &PieceCleanupTask{} +var _ = harmonytask.Reg(&PieceCleanupTask{}) diff --git a/tasks/indexing/task_check_indexes.go b/tasks/indexing/task_check_indexes.go index eb47ecdfe..c92363cfe 100644 --- a/tasks/indexing/task_check_indexes.go +++ b/tasks/indexing/task_check_indexes.go @@ -3,21 +3,29 @@ package indexing import ( "bytes" "context" + "database/sql" + "fmt" + "net/url" "time" + "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/oklog/ulid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/mk20" ) -const CheckIndexInterval = 9 * time.Minute +const CheckIndexInterval = time.Hour * 6 var MaxOngoingIndexingTasks = 40 @@ -59,41 +67,59 @@ func (c *CheckIndexesTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) return false, xerrors.Errorf("checking IPNI: %w", err) } + err = c.checkIPNIMK20(ctx, taskID) + if err != nil { + return false, xerrors.Errorf("checking IPNI for MK20 deals: %w", err) + } + return true, nil } func (c *CheckIndexesTask) checkIndexing(ctx context.Context, taskID harmonytask.TaskID) error { type checkEntry struct { - PieceCid string `db:"piece_cid"` - PieceLen int64 `db:"piece_length"` - PieceOff int64 `db:"piece_offset"` - SPID int64 `db:"sp_id"` - SectorID int64 `db:"sector_num"` - RawSize int64 `db:"raw_size"` + ID string `db:"id"` + PieceCid string `db:"piece_cid"` + PieceLen int64 `db:"piece_length"` + PieceOff int64 `db:"piece_offset"` + SPID int64 `db:"sp_id"` + SectorID int64 `db:"sector_num"` + RawSize int64 `db:"raw_size"` + PieceRef sql.NullInt64 `db:"piece_ref"` } var toCheckList []checkEntry err := c.db.Select(ctx, &toCheckList, ` - SELECT mm.piece_cid, mpd.piece_length, mpd.piece_offset, mpd.sp_id, mpd.sector_num, mpd.raw_size + SELECT mm.piece_cid, mpd.piece_length, mpd.piece_offset, mpd.sp_id, mpd.sector_num, mpd.raw_size, mpd.piece_ref, mpd.id FROM market_piece_metadata mm - LEFT JOIN market_piece_deal mpd ON mm.piece_cid = mpd.piece_cid - WHERE mm.indexed = true + LEFT JOIN market_piece_deal mpd ON mm.piece_cid = mpd.piece_cid AND mm.piece_size = mpd.piece_length + WHERE mm.indexed = true AND mpd.sp_id > 0 AND mpd.sector_num > 0 `) if err != nil { return err } - toCheck := make(map[string][]checkEntry) + toCheck := make(map[abi.PieceInfo][]checkEntry) for _, e := range toCheckList { - toCheck[e.PieceCid] = append(toCheck[e.PieceCid], e) + pCid, err := cid.Parse(e.PieceCid) + if err != nil { + return xerrors.Errorf("parsing piece cid: %w", err) + } + pi := abi.PieceInfo{PieceCID: pCid, Size: abi.PaddedPieceSize(e.PieceLen)} + toCheck[pi] = append(toCheck[pi], e) } // Check the number of ongoing indexing tasks var ongoingIndexingTasks int64 - err = c.db.QueryRow(ctx, ` - SELECT COUNT(*) - FROM market_mk12_deal_pipeline - WHERE indexing_created_at IS NOT NULL AND indexed = false - `).Scan(&ongoingIndexingTasks) + err = c.db.QueryRow(ctx, `SELECT + ( + SELECT COUNT(*) + FROM market_mk12_deal_pipeline + WHERE indexing_created_at IS NOT NULL AND indexed = false + ) + + ( + SELECT COUNT(*) + FROM market_mk20_pipeline + WHERE indexing_created_at IS NOT NULL AND indexed = false + ) AS total_pending_indexing;`).Scan(&ongoingIndexingTasks) if err != nil { return xerrors.Errorf("counting ongoing indexing tasks: %w", err) } @@ -104,13 +130,13 @@ func (c *CheckIndexesTask) checkIndexing(ctx context.Context, taskID harmonytask var have, missing int64 - for p, cent := range toCheck { - pieceCid, err := cid.Parse(p) + for p, cents := range toCheck { + pieceCid, err := commcidv2.PieceCidV2FromV1(p.PieceCID, uint64(cents[0].RawSize)) if err != nil { - return xerrors.Errorf("parsing piece cid: %w", err) + return xerrors.Errorf("getting piece commP: %w", err) } - // Check if the piece is present in the index store + // Check if the pieceV2 is present in the index store hasEnt, err := c.indexStore.CheckHasPiece(ctx, pieceCid) if err != nil { return xerrors.Errorf("getting piece hash range: %w", err) @@ -121,130 +147,221 @@ func (c *CheckIndexesTask) checkIndexing(ctx context.Context, taskID harmonytask continue } - // Index not present, flag for repair - missing++ - log.Warnw("piece missing in indexstore", "piece", pieceCid, "task", taskID) - - var uuids []struct { - DealUUID string `db:"uuid"` - } - err = c.db.Select(ctx, &uuids, ` - SELECT uuid - FROM market_mk12_deals - WHERE piece_cid = $1 - `, pieceCid.String()) - if err != nil { - return xerrors.Errorf("getting deal uuids: %w", err) - } - if len(uuids) == 0 { - log.Warnw("no deals for unindexed piece", "piece", pieceCid, "task", taskID) - continue - } - - // Check the number of ongoing indexing tasks again - err = c.db.QueryRow(ctx, ` - SELECT COUNT(*) - FROM market_mk12_deal_pipeline - WHERE indexing_created_at IS NOT NULL AND indexed = false - `).Scan(&ongoingIndexingTasks) - if err != nil { - return xerrors.Errorf("counting ongoing indexing tasks: %w", err) - } - if ongoingIndexingTasks >= int64(MaxOngoingIndexingTasks) { - log.Warnw("too many ongoing indexing tasks, stopping processing missing pieces", "task", taskID, "ongoing", ongoingIndexingTasks) - break - } - - // Collect deal UUIDs - dealUUIDs := make([]string, 0, len(uuids)) - for _, u := range uuids { - dealUUIDs = append(dealUUIDs, u.DealUUID) - } - - // Get deal details from market_mk12_deals - var deals []struct { - UUID string `db:"uuid"` - SPID int64 `db:"sp_id"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - Offline bool `db:"offline"` - URL *string `db:"url"` - Headers []byte `db:"url_headers"` - CreatedAt time.Time `db:"created_at"` - } - err = c.db.Select(ctx, &deals, ` - SELECT uuid, sp_id, piece_cid, piece_size, offline, url, url_headers, created_at - FROM market_mk12_deals - WHERE uuid = ANY($1) - `, dealUUIDs) + // Check if the pieceV1 is present in the index store + hasEnt, err = c.indexStore.CheckHasPiece(ctx, p.PieceCID) if err != nil { - return xerrors.Errorf("getting deal details: %w", err) + return xerrors.Errorf("getting piece hash range: %w", err) } - // Use the first deal for processing - deal := deals[0] - - var sourceSector *storiface.SectorRef - var sourceOff, rawSize int64 - for _, entry := range cent { - if entry.SPID != deal.SPID { - continue - } - if sourceSector = c.findSourceSector(ctx, entry.SPID, entry.SectorID); sourceSector == nil { - // No unsealed copy - continue + if hasEnt { + err = c.indexStore.UpdatePieceCidV1ToV2(ctx, p.PieceCID, pieceCid) + if err != nil { + return xerrors.Errorf("updating piece cid v1 to v2: %w", err) } - sourceOff = entry.PieceOff - rawSize = entry.RawSize - break - } - - if sourceSector == nil { - log.Infow("no unsealed copy of sector found for reindexing", "piece", pieceCid, "task", taskID, "deals", len(deals), "have", have, "missing", missing, "ongoing", ongoingIndexingTasks) + log.Infow("piece cid v1 to v2 updated", "piece", p.PieceCID, "task", taskID) + have++ continue } - var added bool - - _, err = c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - added = false - - // Insert into market_mk12_deal_pipeline - n, err := tx.Exec(` - INSERT INTO market_mk12_deal_pipeline ( - uuid, sp_id, piece_cid, piece_size, raw_size, offline, url, headers, created_at, - sector, sector_offset, reg_seal_proof, - started, after_psd, after_commp, after_find_deal, sealed, complete, - indexed, indexing_created_at, indexing_task_id, should_index - ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, - true, true, true, true, true, true, - false, NOW(), NULL, true) - ON CONFLICT (uuid) DO NOTHING - `, deal.UUID, deal.SPID, deal.PieceCID, deal.PieceSize, rawSize, deal.Offline, deal.URL, deal.Headers, deal.CreatedAt, - sourceSector.ID.Number, sourceOff, int64(sourceSector.ProofType)) - if err != nil { - return false, xerrors.Errorf("upserting into deal pipeline for uuid %s: %w", deal.UUID, err) - } - if n == 0 { - return false, nil - } - added = true + // Index not present, flag for repair + missing++ + log.Warnw("piece missing in indexstore", "piece", pieceCid, "task", taskID) - _, err = tx.Exec(`UPDATE market_piece_metadata SET indexed = FALSE WHERE piece_cid = $1`, pieceCid.String()) + for _, cent := range cents { + var isMK12 bool + var id ulid.ULID + + id, err := ulid.Parse(cent.ID) if err != nil { - return false, xerrors.Errorf("updating market_piece_metadata.indexed column: %w", err) + serr := err + _, err = uuid.Parse(cent.ID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w, %w", serr, err) + } + isMK12 = true } - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return xerrors.Errorf("inserting into market_mk12_deal_pipeline: %w", err) - } + var scheduled bool + + if isMK12 { + // Get deal details from market_mk12_deals + var mk12deals []struct { + UUID string `db:"uuid"` + SPID int64 `db:"sp_id"` + PieceCID string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + Offline bool `db:"offline"` + CreatedAt time.Time `db:"created_at"` + } + err = c.db.Select(ctx, &mk12deals, `SELECT + uuid, + sp_id, + piece_cid, + piece_size, + offline, + created_at, + FALSE AS ddo + FROM market_mk12_deals + WHERE uuid = $1 + + UNION ALL + + SELECT + uuid, + sp_id, + piece_cid, + piece_size, + TRUE AS offline, + created_at, + TRUE AS ddo + FROM market_direct_deals + WHERE uuid = $1; + `, cent.ID) + if err != nil { + return xerrors.Errorf("getting deal details: %w", err) + } + + if len(mk12deals) == 0 { + log.Warnw("no mk12 deals for unindexed piece", "piece", pieceCid, "task", taskID) + continue + } + + mk12deal := mk12deals[0] + + if cent.PieceRef.Valid { + continue // This is mk20 deal + } + if cent.SPID != mk12deal.SPID { + continue + } + sourceSector := c.findSourceSector(ctx, cent.SPID, cent.SectorID) + if sourceSector == nil { + continue + } + + var added bool + + _, err = c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + added = false + + // Insert into market_mk12_deal_pipeline + n, err := tx.Exec(` + INSERT INTO market_mk12_deal_pipeline ( + uuid, sp_id, piece_cid, piece_size, raw_size, offline, created_at, + sector, sector_offset, reg_seal_proof, + started, after_psd, after_commp, after_find_deal, sealed, complete, + indexed, indexing_created_at, indexing_task_id, should_index + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, + true, true, true, true, true, true, + false, NOW(), NULL, true) + ON CONFLICT (uuid) DO NOTHING + `, mk12deal.UUID, mk12deal.SPID, mk12deal.PieceCID, mk12deal.PieceSize, cent.RawSize, mk12deal.Offline, mk12deal.CreatedAt, + sourceSector.ID.Number, cent.PieceOff, int64(sourceSector.ProofType)) + if err != nil { + return false, xerrors.Errorf("upserting into deal pipeline for uuid %s: %w", mk12deal.UUID, err) + } + if n == 0 { + return false, nil + } + added = true + + _, err = tx.Exec(`UPDATE market_piece_metadata SET indexed = FALSE WHERE piece_cid = $1 AND piece_size = $2`, p.PieceCID.String(), p.Size) + if err != nil { + return false, xerrors.Errorf("updating market_piece_metadata.indexed column: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("inserting into market_mk12_deal_pipeline: %w", err) + } + + if added { + log.Infow("added reindexing pipeline entry", "uuid", mk12deal.UUID, "task", taskID, "piece", pieceCid) + ongoingIndexingTasks++ + scheduled = true + } + } else { + if !cent.PieceRef.Valid { + continue + } + + deal, err := mk20.DealFromDB(ctx, c.db, id) + if err != nil { + log.Warnw("failed to get deal from db", "id", id.String(), "task", taskID) + continue + } + + spid, err := address.IDFromAddress(deal.Products.DDOV1.Provider) + if err != nil { + return xerrors.Errorf("parsing provider address: %w", err) + } + + pi, err := deal.PieceInfo() + if err != nil { + return xerrors.Errorf("getting piece info: %w", err) + } + + if uint64(cent.SPID) != spid { + continue + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", cent.PieceRef.Int64), + } + + data := deal.Data + ddo := deal.Products.DDOV1 + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + var added bool + + _, err = c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, piece_size, raw_size, + offline, url, indexing, announce, duration, piece_aggregation, + started, downloaded, after_commp, aggregated, sector, reg_seal_proof, sector_offset, sealed, + indexing_created_at, complete) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, + TRUE, TRUE, TRUE, TRUE, $15, 0, $16, TRUE, NOW(), TRUE)`, // We can use reg_seal_proof = 0 because process_piece_deal will prevent new entry from being created + deal.Identifier.String(), spid, ddo.ContractAddress, deal.Client, deal.Data.PieceCID.String(), pi.PieceCIDV1.String(), pi.Size, int64(pi.RawSize), + false, pieceIDUrl.String(), true, false, ddo.Duration, aggregation, + cent.SectorID, cent.PieceOff) + if err != nil { + return false, xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + + added = true + + _, err = tx.Exec(`UPDATE market_piece_metadata SET indexed = FALSE WHERE piece_cid = $1 AND piece_size = $2`, p.PieceCID.String(), p.Size) + if err != nil { + return false, xerrors.Errorf("updating market_piece_metadata.indexed column: %w", err) + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("inserting into market_mk20_pipeline: %w", err) + } + + if added { + log.Infow("added reindexing pipeline entry", "id", id, "task", taskID, "piece", pieceCid) + ongoingIndexingTasks++ + scheduled = true + } + } - if added { - log.Infow("added reindexing pipeline entry", "uuid", deal.UUID, "task", taskID, "piece", deal.PieceCID) - ongoingIndexingTasks++ + if scheduled { + break // Break out of PieceDeal loop + } } if ongoingIndexingTasks >= int64(MaxOngoingIndexingTasks) { @@ -260,8 +377,9 @@ func (c *CheckIndexesTask) checkIndexing(ctx context.Context, taskID harmonytask func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.TaskID) (err error) { type pieceSP struct { - PieceCid string `db:"piece_cid"` - SpID int64 `db:"sp_id"` + PieceCid string `db:"piece_cid"` + PieceSize abi.PaddedPieceSize `db:"piece_size"` + SpID int64 `db:"sp_id"` } // get candidates to check @@ -301,7 +419,7 @@ func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.Tas // get already running pipelines with announce=true var announcePiecePipelines []pieceSP - err = c.db.Select(ctx, &announcePiecePipelines, `SELECT piece_cid, sp_id FROM market_mk12_deal_pipeline WHERE announce=true`) + err = c.db.Select(ctx, &announcePiecePipelines, `SELECT piece_cid, piece_size, sp_id FROM market_mk12_deal_pipeline WHERE announce=true`) if err != nil { return xerrors.Errorf("getting ipni tasks: %w", err) } @@ -322,13 +440,13 @@ func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.Tas return nil } - var have, missisg, issues int64 + var have, missing, issues int64 defer func() { - log.Infow("IPNI Ad check", "have", have, "missisg", missisg, "issues", issues, "err", err) + log.Infow("IPNI Ad check", "have", have, "missing", missing, "issues", issues, "err", err) }() for _, deal := range toCheck { - if _, ok := announcablePipelines[pieceSP{deal.PieceCID, deal.SpID}]; ok { + if _, ok := announcablePipelines[pieceSP{deal.PieceCID, deal.PieceSize, deal.SpID}]; ok { // pipeline for the piece already running have++ continue @@ -388,7 +506,7 @@ func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.Tas PieceOffset int64 `db:"piece_offset"` RawSize int64 `db:"raw_size"` } - err = c.db.Select(ctx, &sourceSector, `SELECT sector_num, piece_offset, raw_size FROM market_piece_deal WHERE piece_cid=$1 AND sp_id = $2`, deal.PieceCID, deal.SpID) + err = c.db.Select(ctx, &sourceSector, `SELECT sector_num, piece_offset, raw_size FROM market_piece_deal WHERE piece_cid=$1 AND piece_length = $2 AND sp_id = $3`, deal.PieceCID, deal.PieceSize, deal.SpID) if err != nil { return xerrors.Errorf("getting source sector: %w", err) } @@ -415,7 +533,7 @@ func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.Tas continue } - missisg++ + missing++ n, err := c.db.Exec(ctx, ` INSERT INTO market_mk12_deal_pipeline ( @@ -449,6 +567,184 @@ func (c *CheckIndexesTask) checkIPNI(ctx context.Context, taskID harmonytask.Tas return nil } +func (c *CheckIndexesTask) checkIPNIMK20(ctx context.Context, taskID harmonytask.TaskID) (err error) { + var ids []struct { + ID string `db:"id"` + } + + err = c.db.Select(ctx, &ids, `SELECT m.id + FROM market_mk20_deal AS m + LEFT JOIN ipni AS i + ON m.piece_cid_v2 = i.piece_cid_v2 + LEFT JOIN market_mk20_pipeline AS p + ON m.id = p.id + LEFT JOIN market_mk20_pipeline_waiting AS w + ON m.id = w.id + WHERE m.piece_cid_v2 IS NOT NULL + AND m.ddo_v1 IS NOT NULL + AND m.ddo_v1 != 'null' + AND (m.retrieval_v1->>'announce_payload')::boolean = TRUE + AND i.piece_cid_v2 IS NULL + AND p.id IS NULL + AND w.id IS NULL;`) + if err != nil { + return xerrors.Errorf("getting mk20 deals which are not announced: %w", err) + } + + if len(ids) == 0 { + return nil + } + + var ipniPeerIDs []struct { + SpID int64 `db:"sp_id"` + PeerID string `db:"peer_id"` + } + err = c.db.Select(ctx, &ipniPeerIDs, `SELECT sp_id, peer_id FROM ipni_peerid`) + if err != nil { + return xerrors.Errorf("getting ipni tasks: %w", err) + } + + spToPeer := map[int64]string{} + for _, d := range ipniPeerIDs { + spToPeer[d.SpID] = d.PeerID + } + + var ongoingIpniTasks int64 + err = c.db.QueryRow(ctx, `SELECT COUNT(1) FROM ipni_task`).Scan(&ongoingIpniTasks) + if err != nil { + return xerrors.Errorf("getting ipni tasks: %w", err) + } + if ongoingIpniTasks >= int64(MaxOngoingIndexingTasks) { + log.Debugw("too many ongoing ipni tasks, skipping ipni index checks", "task", taskID, "ongoing", ongoingIpniTasks) + return nil + } + + var have, missing, issues int64 + for _, i := range ids { + id, err := ulid.Parse(i.ID) + if err != nil { + return xerrors.Errorf("parsing deal id: %w", err) + } + + deal, err := mk20.DealFromDB(ctx, c.db, id) + if err != nil { + return xerrors.Errorf("getting deal from db: %w", err) + } + + spid, err := address.IDFromAddress(deal.Products.DDOV1.Provider) + if err != nil { + return xerrors.Errorf("parsing provider address: %w", err) + } + + pinfo, err := deal.PieceInfo() + if err != nil { + return xerrors.Errorf("getting piece info: %w", err) + } + + pi := abi.PieceInfo{ + PieceCID: pinfo.PieceCIDV1, + Size: pinfo.Size, + } + + var ctxIdBuf bytes.Buffer + err = pi.MarshalCBOR(&ctxIdBuf) + if err != nil { + return xerrors.Errorf("marshaling piece info: %w", err) + } + + ctxId := ctxIdBuf.Bytes() + + provider, ok := spToPeer[int64(spid)] + if !ok { + issues++ + log.Warnw("no peer id for spid", "spid", spid, "checkPiece", deal.Data.PieceCID.String()) + continue + } + + var hasEnt int64 + err = c.db.QueryRow(ctx, `SELECT count(1) FROM ipni WHERE context_id=$1 AND provider=$2`, ctxId, provider).Scan(&hasEnt) + if err != nil { + return xerrors.Errorf("getting piece hash range: %w", err) + } + if hasEnt > 0 { + // has the entry + have++ + continue + } + + hasIndex, err := c.indexStore.CheckHasPiece(ctx, deal.Data.PieceCID) + if err != nil { + return xerrors.Errorf("getting piece hash range: %w", err) + } + if !hasIndex { + log.Warnw("no index for piece with missing IPNI Ad", "piece", deal.Data.PieceCID.String(), "checkPiece", pi.PieceCID) + issues++ + continue + } + + var sourceSector []struct { + SectorNum int64 `db:"sector_num"` + PieceOffset int64 `db:"piece_offset"` + RawSize int64 `db:"raw_size"` + PieceRef sql.NullInt64 `db:"piece_ref"` + } + err = c.db.Select(ctx, &sourceSector, `SELECT sector_num, piece_offset, raw_size, piece_ref FROM market_piece_deal WHERE id = $1`, id.String()) + if err != nil { + return xerrors.Errorf("getting source sector: %w", err) + } + if len(sourceSector) == 0 { + log.Warnw("no source sector for piece", "piece", deal.Data.PieceCID.String(), "checkPiece", pi.PieceCID) + issues++ + continue + } + + src := sourceSector[0] + + if !src.PieceRef.Valid { + log.Warnw("no piece ref for ipni reindexing", "piece", pi.PieceCID, "checkPiece", deal.Data.PieceCID.String()) + missing++ + continue + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", src.PieceRef.Int64), + } + + data := deal.Data + ddo := deal.Products.DDOV1 + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + n, err := c.db.Exec(ctx, `INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, piece_size, raw_size, + offline, url, indexing, announce, duration, piece_aggregation, + started, downloaded, after_commp, aggregated, sector, reg_seal_proof, sector_offset, sealed, + indexing_created_at, indexed, complete) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, + TRUE, TRUE, TRUE, TRUE, $15, 0, $16, TRUE, NOW(), TRUE, FALSE)`, // We can use reg_seal_proof = 0 because process_piece_deal will prevent new entry from being created + deal.Identifier.String(), spid, ddo.ContractAddress, deal.Client, data.PieceCID.String(), pinfo.PieceCIDV1.String(), pinfo.Size, int64(pinfo.RawSize), + false, pieceIDUrl.String(), true, true, ddo.Duration, aggregation, + src.SectorNum, src.PieceOffset) + if err != nil { + return xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + log.Infow("created IPNI reindexing pipeline", "piece", pi.PieceCID, "spid", spid) + ongoingIpniTasks++ + if ongoingIpniTasks >= int64(MaxOngoingIndexingTasks) { + return nil + } + } + + return nil +} + func (c *CheckIndexesTask) findSourceSector(ctx context.Context, spid, sectorNum int64) *storiface.SectorRef { var sourceSector *storiface.SectorRef var qres []struct { @@ -496,7 +792,8 @@ func (c *CheckIndexesTask) TypeDetails() harmonytask.TaskTypeDetails { Gpu: 0, Ram: 32 << 20, }, - IAmBored: harmonytask.SingletonTaskAdder(CheckIndexInterval, c), + IAmBored: harmonytask.SingletonTaskAdder(CheckIndexInterval, c), + MaxFailures: 3, } } diff --git a/tasks/indexing/task_indexing.go b/tasks/indexing/task_indexing.go index d3df8c3fb..4c79df383 100644 --- a/tasks/indexing/task_indexing.go +++ b/tasks/indexing/task_indexing.go @@ -3,18 +3,27 @@ package indexing import ( "bufio" "context" + "database/sql" "errors" "fmt" "io" + "net/url" + "runtime" + "sort" + "strconv" + "sync" "time" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" carv2 "github.com/ipld/go-car/v2" + "github.com/oklog/ulid" "github.com/yugabyte/pgx/v5" "golang.org/x/sync/errgroup" "golang.org/x/xerrors" + "github.com/filecoin-project/go-data-segment/datasegment" + "github.com/filecoin-project/go-data-segment/fr32" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/deps/config" @@ -22,11 +31,14 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/pieceprovider" "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/mk20" ) var log = logging.Logger("indexing") @@ -35,6 +47,7 @@ type IndexingTask struct { db *harmonydb.DB indexStore *indexstore.IndexStore pieceProvider *pieceprovider.SectorReader + cpr *cachedreader.CachedPieceReader sc *ffi.SealCalls cfg *config.CurioConfig insertConcurrency int @@ -42,12 +55,13 @@ type IndexingTask struct { max taskhelp.Limiter } -func NewIndexingTask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore.IndexStore, pieceProvider *pieceprovider.SectorReader, cfg *config.CurioConfig, max taskhelp.Limiter) *IndexingTask { +func NewIndexingTask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore.IndexStore, pieceProvider *pieceprovider.SectorReader, cpr *cachedreader.CachedPieceReader, cfg *config.CurioConfig, max taskhelp.Limiter) *IndexingTask { return &IndexingTask{ db: db, indexStore: indexStore, pieceProvider: pieceProvider, + cpr: cpr, sc: sc, cfg: cfg, insertConcurrency: cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency, @@ -57,18 +71,22 @@ func NewIndexingTask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore } type itask struct { - UUID string `db:"uuid"` - SpID int64 `db:"sp_id"` - Sector abi.SectorNumber `db:"sector"` - Proof abi.RegisteredSealProof `db:"reg_seal_proof"` - PieceCid string `db:"piece_cid"` - Size abi.PaddedPieceSize `db:"piece_size"` - Offset int64 `db:"sector_offset"` - RawSize int64 `db:"raw_size"` - ShouldIndex bool `db:"should_index"` - Announce bool `db:"announce"` - ChainDealId abi.DealID `db:"chain_deal_id"` - IsDDO bool `db:"is_ddo"` + UUID string `db:"uuid"` + SpID int64 `db:"sp_id"` + Sector abi.SectorNumber `db:"sector"` + Proof abi.RegisteredSealProof `db:"reg_seal_proof"` + PieceCid string `db:"piece_cid"` + Size abi.PaddedPieceSize `db:"piece_size"` + Offset int64 `db:"sector_offset"` + RawSize int64 `db:"raw_size"` + Url sql.NullString `db:"url"` + ShouldIndex bool `db:"should_index"` + IndexingCreatedAt time.Time `db:"indexing_created_at"` + Announce bool `db:"announce"` + ChainDealId abi.DealID `db:"chain_deal_id"` + IsDDO bool `db:"is_ddo"` + Mk20 bool `db:"mk20"` + PieceRef int64 } func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { @@ -78,29 +96,53 @@ func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do ctx := context.Background() err = i.db.Select(ctx, &tasks, `SELECT - p.uuid, - p.sp_id, - p.sector, - p.piece_cid, - p.piece_size, - p.sector_offset, - p.reg_seal_proof, - p.raw_size, - p.should_index, - p.announce, - p.is_ddo, - COALESCE(d.chain_deal_id, 0) AS chain_deal_id -- If NULL, return 0 + p.uuid, + p.sp_id, + p.sector, + p.piece_cid, + p.piece_size, + p.sector_offset, + p.reg_seal_proof, + p.raw_size, + p.url, + p.should_index, + p.announce, + p.is_ddo, + COALESCE(d.chain_deal_id, 0) AS chain_deal_id, + FALSE AS mk20 FROM - market_mk12_deal_pipeline p + market_mk12_deal_pipeline p LEFT JOIN - market_mk12_deals d - ON p.uuid = d.uuid AND p.sp_id = d.sp_id + market_mk12_deals d + ON p.uuid = d.uuid AND p.sp_id = d.sp_id LEFT JOIN - market_direct_deals md - ON p.uuid = md.uuid AND p.sp_id = md.sp_id + market_direct_deals md + ON p.uuid = md.uuid AND p.sp_id = md.sp_id WHERE - p.indexing_task_id = $1; - ;`, taskID) + p.indexing_task_id = $1 + + UNION ALL + + SELECT + id AS uuid, + sp_id, + sector, + piece_cid, + piece_size, + sector_offset, + reg_seal_proof, + raw_size, + url, + indexing as should_index, + announce, + TRUE AS is_ddo, + 0 AS chain_deal_id, + TRUE AS mk20 + FROM + market_mk20_pipeline p + WHERE + p.indexing_task_id = $1; + `, taskID) if err != nil { return false, xerrors.Errorf("getting indexing params: %w", err) } @@ -113,18 +155,72 @@ func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do // Check if piece is already indexed var indexed bool - err = i.db.QueryRow(ctx, `SELECT indexed FROM market_piece_metadata WHERE piece_cid = $1`, task.PieceCid).Scan(&indexed) + err = i.db.QueryRow(ctx, `SELECT indexed FROM market_piece_metadata WHERE piece_cid = $1 and piece_size = $2`, task.PieceCid, task.Size).Scan(&indexed) if err != nil && !errors.Is(err, pgx.ErrNoRows) { return false, xerrors.Errorf("checking if piece %s is already indexed: %w", task.PieceCid, err) } + var byteData bool + var subPieces []mk20.DataSource + + if task.Mk20 { + id, err := ulid.Parse(task.UUID) + if err != nil { + return false, xerrors.Errorf("parsing id: %w", err) + } + deal, err := mk20.DealFromDB(ctx, i.db, id) + if err != nil { + return false, xerrors.Errorf("getting mk20 deal from DB: %w", err) + } + if deal.Data.Format.Aggregate != nil { + if deal.Data.Format.Aggregate.Type > 0 { + var found bool + if len(deal.Data.Format.Aggregate.Sub) > 0 { + subPieces = deal.Data.Format.Aggregate.Sub + found = true + } + if len(deal.Data.SourceAggregate.Pieces) > 0 { + subPieces = deal.Data.SourceAggregate.Pieces + found = true + } + if !found { + return false, xerrors.Errorf("no sub pieces for aggregate mk20 deal") + } + } + } + + if deal.Data.Format.Raw != nil { + byteData = true + } + + if !task.Url.Valid { + return false, xerrors.Errorf("no url for mk20 deal") + } + + url, err := url.Parse(task.Url.String) + if err != nil { + return false, xerrors.Errorf("parsing url: %w", err) + } + + if url.Scheme != "pieceref" { + return false, xerrors.Errorf("invalid url scheme: %s", url.Scheme) + } + + refNum, err := strconv.ParseInt(url.Opaque, 10, 64) + if err != nil { + return false, xerrors.Errorf("parsing piece reference number: %w", err) + } + + task.PieceRef = refNum + } + // Return early if already indexed or should not be indexed - if indexed || !task.ShouldIndex { + if indexed || !task.ShouldIndex || byteData { err = i.recordCompletion(ctx, task, taskID, false) if err != nil { return false, err } - log.Infow("Piece already indexed or should not be indexed", "piece_cid", task.PieceCid, "indexed", indexed, "should_index", task.ShouldIndex, "uuid", task.UUID, "sp_id", task.SpID, "sector", task.Sector) + log.Infow("Piece already indexed or should not be indexed", "piece_cid", task.PieceCid, "indexed", indexed, "should_index", task.ShouldIndex, "id", task.UUID, "sp_id", task.SpID, "sector", task.Sector) return true, nil } @@ -134,16 +230,31 @@ func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do return false, xerrors.Errorf("parsing piece CID: %w", err) } - reader, err := i.pieceProvider.ReadPiece(ctx, storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(task.SpID), - Number: task.Sector, - }, - ProofType: task.Proof, - }, storiface.PaddedByteIndex(task.Offset).Unpadded(), task.Size.Unpadded(), pieceCid) - + pc2, err := commcidv2.PieceCidV2FromV1(pieceCid, uint64(task.RawSize)) if err != nil { - return false, xerrors.Errorf("getting piece reader: %w", err) + return false, xerrors.Errorf("getting piece commP: %w", err) + } + + var reader storiface.Reader + + if task.Mk20 { + reader, _, err = i.cpr.GetSharedPieceReader(ctx, pc2) + + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + } else { + reader, err = i.pieceProvider.ReadPiece(ctx, storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(task.SpID), + Number: task.Sector, + }, + ProofType: task.Proof, + }, storiface.PaddedByteIndex(task.Offset).Unpadded(), task.Size.Unpadded(), pieceCid) + + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } } defer func() { @@ -156,33 +267,191 @@ func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do chanSize := dealCfg.Indexing.InsertConcurrency * dealCfg.Indexing.InsertBatchSize recs := make(chan indexstore.Record, chanSize) - - //recs := make([]indexstore.Record, 0, chanSize) - opts := []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)} - blockReader, err := carv2.NewBlockReader(bufio.NewReaderSize(reader, 4<<20), opts...) - if err != nil { - return false, fmt.Errorf("getting block reader over piece: %w", err) - } + var blocks int64 var eg errgroup.Group addFail := make(chan struct{}) var interrupted bool - var blocks int64 - start := time.Now() eg.Go(func() error { defer close(addFail) + return i.indexStore.AddIndex(ctx, pc2, recs) + }) + + var aggidx map[cid.Cid][]indexstore.Record + + if task.Mk20 && len(subPieces) > 0 { + blocks, aggidx, interrupted, err = IndexAggregate(pc2, reader, task.Size, subPieces, recs, addFail) + } else { + blocks, interrupted, err = IndexCAR(reader, 4<<20, recs, addFail) + } + + if err != nil { + // Indexing itself failed, stop early + close(recs) // still safe to close, AddIndex will exit on channel close + // wait for AddIndex goroutine to finish cleanly + _ = eg.Wait() + return false, xerrors.Errorf("indexing failed: %w", err) + } + + // Close the channel + close(recs) + + // Wait till AddIndex is finished + err = eg.Wait() + if err != nil { + return false, xerrors.Errorf("adding index to DB (interrupted %t): %w", interrupted, err) + } + + log.Infof("Indexing deal %s took %0.3f seconds", task.UUID, time.Since(startTime).Seconds()) + + // Save aggregate index if present + for k, v := range aggidx { + if len(v) > 0 { + err = i.indexStore.InsertAggregateIndex(ctx, k, v) + if err != nil { + return false, xerrors.Errorf("inserting aggregate index: %w", err) + } + } + } + + err = i.recordCompletion(ctx, task, taskID, true) + if err != nil { + return false, err + } + + blocksPerSecond := float64(blocks) / time.Since(startTime).Seconds() + log.Infow("Piece indexed", "piece_cid", task.PieceCid, "id", task.UUID, "sp_id", task.SpID, "sector", task.Sector, "blocks", blocks, "blocks_per_second", blocksPerSecond) + + return true, nil +} + +// parseDataSegmentIndex is a local more efficient alternative to the method provided by the datasegment library +func parseDataSegmentIndex(unpaddedReader io.Reader) (datasegment.IndexData, error) { + const ( + unpaddedChunk = 127 + paddedChunk = 128 + ) + + // Read all unpadded data (up to 32 MiB Max as per FRC for 64 GiB sector) + unpaddedData, err := io.ReadAll(unpaddedReader) + if err != nil { + return datasegment.IndexData{}, xerrors.Errorf("reading unpadded data: %w", err) + } + + // Make sure it's aligned to 127 + if len(unpaddedData)%unpaddedChunk != 0 { + return datasegment.IndexData{}, fmt.Errorf("unpadded data length %d is not a multiple of 127", len(unpaddedData)) + } + numChunks := len(unpaddedData) / unpaddedChunk + + // Prepare padded output buffer + paddedData := make([]byte, numChunks*paddedChunk) + + // Parallel pad + var wg sync.WaitGroup + concurrency := runtime.NumCPU() + chunkPerWorker := (numChunks + concurrency - 1) / concurrency + + for w := 0; w < concurrency; w++ { + start := w * chunkPerWorker + end := (w + 1) * chunkPerWorker + if end > numChunks { + end = numChunks + } + wg.Add(1) + go func(start, end int) { + defer wg.Done() + for i := start; i < end; i++ { + in := unpaddedData[i*unpaddedChunk : (i+1)*unpaddedChunk] + out := paddedData[i*paddedChunk : (i+1)*paddedChunk] + fr32.Pad(in, out) + } + }(start, end) + } + wg.Wait() + + // Decode entries + allEntries := make([]datasegment.SegmentDesc, numChunks*2) + for i := 0; i < numChunks; i++ { + p := paddedData[i*paddedChunk : (i+1)*paddedChunk] + + if err := allEntries[i*2+0].UnmarshalBinary(p[:datasegment.EntrySize]); err != nil { + return datasegment.IndexData{}, xerrors.Errorf("unmarshal entry 1 at chunk %d: %w", i, err) + } + if err := allEntries[i*2+1].UnmarshalBinary(p[datasegment.EntrySize:]); err != nil { + return datasegment.IndexData{}, xerrors.Errorf("unmarshal entry 2 at chunk %d: %w", i, err) + } + } - serr := i.indexStore.AddIndex(ctx, pieceCid, recs) - if serr != nil { - return xerrors.Errorf("adding index to DB: %w", serr) + return datasegment.IndexData{Entries: allEntries}, nil +} + +func validateSegments(segments []datasegment.SegmentDesc) []datasegment.SegmentDesc { + entryCount := len(segments) + + validCh := make(chan datasegment.SegmentDesc, entryCount) + var wg sync.WaitGroup + + workers := runtime.NumCPU() + chunkSize := (entryCount + workers - 1) / workers + + for w := 0; w < workers; w++ { + start := w * chunkSize + end := (w + 1) * chunkSize + if end > entryCount { + end = entryCount + } + if start >= end { + break } - return nil + + wg.Add(1) + go func(start, end int) { + defer wg.Done() + for i := start; i < end; i++ { + entry := segments[i] + if err := entry.Validate(); err == nil { + validCh <- entry + } + log.Debugw("data segment invalid", "segment", entry) + } + }(start, end) + } + + go func() { + wg.Wait() + close(validCh) + }() + + var validEntries []datasegment.SegmentDesc + for entry := range validCh { + validEntries = append(validEntries, entry) + } + sort.Slice(validEntries, func(i, j int) bool { + return validEntries[i].Offset < validEntries[j].Offset }) + return validEntries +} + +func IndexCAR(r io.Reader, buffSize int, recs chan<- indexstore.Record, addFail <-chan struct{}) (int64, bool, error) { + blockReader, err := carv2.NewBlockReader(bufio.NewReaderSize(r, buffSize), carv2.ZeroLengthSectionAsEOF(true)) + if err != nil { + return 0, false, fmt.Errorf("getting block reader over piece: %w", err) + } + + var blocks int64 + var interrupted bool + + for { + blockMetadata, err := blockReader.SkipNext() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return blocks, interrupted, fmt.Errorf("generating index for piece: %w", err) + } - blockMetadata, err := blockReader.SkipNext() -loop: - for err == nil { blocks++ select { @@ -193,63 +462,172 @@ loop: }: case <-addFail: interrupted = true - break loop } - blockMetadata, err = blockReader.SkipNext() - } - if err != nil && !errors.Is(err, io.EOF) { - return false, fmt.Errorf("generating index for piece: %w", err) + + if interrupted { + break + } } - // Close the channel - close(recs) + return blocks, interrupted, nil +} - // Wait till AddIndex is finished - err = eg.Wait() +type IndexReader interface { + io.ReaderAt + io.Seeker + io.Reader +} + +func IndexAggregate(pieceCid cid.Cid, + reader IndexReader, + size abi.PaddedPieceSize, + subPieces []mk20.DataSource, + recs chan<- indexstore.Record, + addFail <-chan struct{}, +) (int64, map[cid.Cid][]indexstore.Record, bool, error) { + dsis := datasegment.DataSegmentIndexStartOffset(size) + if _, err := reader.Seek(int64(dsis), io.SeekStart); err != nil { + return 0, nil, false, xerrors.Errorf("seeking to data segment index start offset: %w", err) + } + + idata, err := parseDataSegmentIndex(reader) if err != nil { - return false, xerrors.Errorf("adding index to DB (interrupted %t): %w", interrupted, err) + return 0, nil, false, xerrors.Errorf("parsing data segment index: %w", err) + } + if len(idata.Entries) == 0 { + return 0, nil, false, xerrors.New("no data segment index entries") } - log.Infof("Indexing deal %s took %0.3f seconds", task.UUID, time.Since(startTime).Seconds()) + valid := validateSegments(idata.Entries) + if len(valid) == 0 { + return 0, nil, false, xerrors.New("no valid data segment index entries") + } - err = i.recordCompletion(ctx, task, taskID, true) - if err != nil { - return false, err + aggidx := make(map[cid.Cid][]indexstore.Record) + + log.Infow("Indexing aggregate", "piece_size", size, "num_chunks", len(valid), "num_sub_pieces", len(subPieces)) + + if len(subPieces) > 1 { + if len(valid) != len(subPieces) { + return 0, nil, false, xerrors.Errorf("expected %d data segment index entries, got %d", len(subPieces), len(idata.Entries)) + } + } else { + return 0, nil, false, xerrors.Errorf("expected at least 2 sub pieces, got 0") } - blocksPerSecond := float64(blocks) / time.Since(start).Seconds() - log.Infow("Piece indexed", "piece_cid", task.PieceCid, "uuid", task.UUID, "sp_id", task.SpID, "sector", task.Sector, "blocks", blocks, "blocks_per_second", blocksPerSecond) + var totalBlocks int64 + for j, entry := range valid { + bufferSize := 4 << 20 + if entry.Size < uint64(bufferSize) { + bufferSize = int(entry.Size) + } + strt := entry.UnpaddedOffest() + leng := entry.UnpaddedLength() + sectionReader := io.NewSectionReader(reader, int64(strt), int64(leng)) + sp := subPieces[j] - return true, nil + if sp.Format.Car != nil { + b, inter, err := IndexCAR(sectionReader, bufferSize, recs, addFail) + if err != nil { + //// Allow one more layer of aggregation to be indexed + //if strings.Contains(err.Error(), "invalid car version") { + // if haveSubPieces { + // if subPieces[j].Car != nil { + // return 0, aggidx, false, xerrors.Errorf("invalid car version for subPiece %d: %w", j, err) + // } + // if subPieces[j].Raw != nil { + // continue + // } + // if subPieces[j].Aggregate != nil { + // b, idx, inter, err = IndexAggregate(commp.PCidV2(), sectionReader, abi.PaddedPieceSize(entry.Size), nil, recs, addFail) + // if err != nil { + // return totalBlocks, aggidx, inter, xerrors.Errorf("invalid aggregate for subPiece %d: %w", j, err) + // } + // totalBlocks += b + // for k, v := range idx { + // aggidx[k] = append(aggidx[k], v...) + // } + // } + // } else { + // continue + // } + //} + return totalBlocks, aggidx, false, xerrors.Errorf("indexing subPiece %d: %w", j, err) + } + + if inter { + return totalBlocks, aggidx, true, nil + } + totalBlocks += b + } + + aggidx[pieceCid] = append(aggidx[pieceCid], indexstore.Record{ + Cid: sp.PieceCID, + Offset: strt, + Size: leng, + }) + } + + return totalBlocks, aggidx, false, nil } // recordCompletion add the piece metadata and piece deal to the DB and // records the completion of an indexing task in the database func (i *IndexingTask) recordCompletion(ctx context.Context, task itask, taskID harmonytask.TaskID, indexed bool) error { - _, err := i.db.Exec(ctx, `SELECT process_piece_deal($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, - task.UUID, task.PieceCid, !task.IsDDO, task.SpID, task.Sector, task.Offset, task.Size, task.RawSize, indexed, false, task.ChainDealId) - if err != nil { - return xerrors.Errorf("failed to update piece metadata and piece deal for deal %s: %w", task.UUID, err) + if task.Mk20 { + _, err := i.db.Exec(ctx, `SELECT process_piece_deal($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + task.UUID, task.PieceCid, !task.IsDDO, task.SpID, task.Sector, task.Offset, task.Size, task.RawSize, indexed, task.PieceRef, false, task.ChainDealId) + if err != nil { + return xerrors.Errorf("failed to update piece metadata and piece deal for deal %s: %w", task.UUID, err) + } + } else { + _, err := i.db.Exec(ctx, `SELECT process_piece_deal($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + task.UUID, task.PieceCid, !task.IsDDO, task.SpID, task.Sector, task.Offset, task.Size, task.RawSize, indexed, nil, false, task.ChainDealId) + if err != nil { + return xerrors.Errorf("failed to update piece metadata and piece deal for deal %s: %w", task.UUID, err) + } } // If IPNI is disabled then mark deal as complete otherwise just mark as indexed if i.cfg.Market.StorageMarketConfig.IPNI.Disable { - n, err := i.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET indexed = TRUE, indexing_task_id = NULL, + if task.Mk20 { + n, err := i.db.Exec(ctx, `UPDATE market_mk20_pipeline SET indexed = TRUE, indexing_task_id = NULL, + complete = TRUE WHERE id = $1 AND indexing_task_id = $2`, task.UUID, taskID) + if err != nil { + return xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store indexing success: updated %d rows", n) + } + } else { + n, err := i.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET indexed = TRUE, indexing_task_id = NULL, complete = TRUE WHERE uuid = $1 AND indexing_task_id = $2`, task.UUID, taskID) - if err != nil { - return xerrors.Errorf("store indexing success: updating pipeline: %w", err) - } - if n != 1 { - return xerrors.Errorf("store indexing success: updated %d rows", n) + if err != nil { + return xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store indexing success: updated %d rows", n) + } } } else { - n, err := i.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET indexed = TRUE, indexing_task_id = NULL + if task.Mk20 { + n, err := i.db.Exec(ctx, `UPDATE market_mk20_pipeline SET indexed = TRUE, indexing_task_id = NULL + WHERE id = $1 AND indexing_task_id = $2`, task.UUID, taskID) + if err != nil { + return xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store indexing success: updated %d rows", n) + } + } else { + n, err := i.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET indexed = TRUE, indexing_task_id = NULL WHERE uuid = $1 AND indexing_task_id = $2`, task.UUID, taskID) - if err != nil { - return xerrors.Errorf("store indexing success: updating pipeline: %w", err) - } - if n != 1 { - return xerrors.Errorf("store indexing success: updated %d rows", n) + if err != nil { + return xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store indexing success: updated %d rows", n) + } } } @@ -259,45 +637,88 @@ func (i *IndexingTask) recordCompletion(ctx context.Context, task itask, taskID func (i *IndexingTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { ctx := context.Background() + type task struct { + TaskID harmonytask.TaskID `db:"indexing_task_id"` + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector"` + StorageID string `db:"storage_id"` + Url string `db:"url"` + Indexing bool `db:"indexing"` + } + + var tasks []*task + indIDs := make([]int64, len(ids)) for x, id := range ids { indIDs[x] = int64(id) } - // Accept any task which should not be indexed as - // it does not require storage access - var id int64 - err := i.db.QueryRow(ctx, `SELECT indexing_task_id - FROM market_mk12_deal_pipeline - WHERE should_index = FALSE AND - indexing_task_id = ANY ($1) ORDER BY indexing_task_id LIMIT 1`, indIDs).Scan(&id) - if err == nil { - ret := harmonytask.TaskID(id) - return &ret, nil - } else if !errors.Is(err, pgx.ErrNoRows) { - return nil, xerrors.Errorf("getting pending indexing task: %w", err) + var mk20tasks []*task + if storiface.FTPiece != 32 { + panic("storiface.FTPiece != 32") } - var tasks []struct { - TaskID harmonytask.TaskID `db:"indexing_task_id"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector"` - StorageID string `db:"storage_id"` + err := i.db.Select(ctx, &mk20tasks, `SELECT indexing_task_id, url, indexing FROM market_mk20_pipeline WHERE indexing_task_id = ANY($1)`, indIDs) + if err != nil { + return nil, xerrors.Errorf("getting mk20 urls: %w", err) + } + + for _, t := range mk20tasks { + + if !t.Indexing { + continue + } + + goUrl, err := url.Parse(t.Url) + if err != nil { + return nil, xerrors.Errorf("parsing data URL: %w", err) + } + if goUrl.Scheme == "pieceref" { + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return nil, xerrors.Errorf("parsing piece reference number: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = i.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) + if err != nil { + return nil, xerrors.Errorf("getting pieceID: %w", err) + } + + var sLocation string + + err = i.db.QueryRow(ctx, ` + SELECT storage_id FROM sector_location + WHERE miner_id = 0 AND sector_num = $1 AND sector_filetype = 32`, pieceID[0].PieceID).Scan(&sLocation) + + if err != nil { + return nil, xerrors.Errorf("failed to get storage location from DB: %w", err) + } + + t.StorageID = sLocation + + } } if storiface.FTUnsealed != 1 { panic("storiface.FTUnsealed != 1") } - err = i.db.Select(ctx, &tasks, ` - SELECT dp.indexing_task_id, dp.sp_id, dp.sector, l.storage_id FROM market_mk12_deal_pipeline dp - INNER JOIN sector_location l ON dp.sp_id = l.miner_id AND dp.sector = l.sector_num - WHERE dp.indexing_task_id = ANY ($1) AND l.sector_filetype = 1 -`, indIDs) + var mk12tasks []*task + + err = i.db.Select(ctx, &mk12tasks, `SELECT dp.indexing_task_id, dp.should_index AS indexing, dp.sp_id, dp.sector, l.storage_id + FROM market_mk12_deal_pipeline dp + INNER JOIN sector_location l ON dp.sp_id = l.miner_id AND dp.sector = l.sector_num + WHERE dp.indexing_task_id = ANY ($1) AND l.sector_filetype = 1`, indIDs) if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) + return nil, xerrors.Errorf("getting mk12 tasks: %w", err) } + tasks = append(mk20tasks, mk12tasks...) + ls, err := i.sc.LocalStorage(ctx) if err != nil { return nil, xerrors.Errorf("getting local storage: %w", err) @@ -309,6 +730,9 @@ func (i *IndexingTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.T } for _, t := range tasks { + if !t.Indexing { + return &t.TaskID, nil + } if found, ok := localStorageMap[t.StorageID]; ok && found { return &t.TaskID, nil } @@ -342,35 +766,60 @@ func (i *IndexingTask) schedule(ctx context.Context, taskFunc harmonytask.AddTas taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { stop = true // assume we're done until we find a task to schedule - var pendings []struct { + var mk12Pendings []struct { UUID string `db:"uuid"` } // Indexing job must be created for every deal to make sure piece details are inserted in DB // even if we don't want to index it. If piece is not supposed to be indexed then it will handled // by the Do() - err := i.db.Select(ctx, &pendings, `SELECT uuid FROM market_mk12_deal_pipeline + err := tx.Select(&mk12Pendings, `SELECT uuid FROM market_mk12_deal_pipeline WHERE sealed = TRUE AND indexing_task_id IS NULL AND indexed = FALSE ORDER BY indexing_created_at ASC LIMIT 1;`) if err != nil { - return false, xerrors.Errorf("getting pending indexing tasks: %w", err) + return false, xerrors.Errorf("getting pending mk12 indexing tasks: %w", err) } - if len(pendings) == 0 { - return false, nil + if len(mk12Pendings) > 0 { + pending := mk12Pendings[0] + + _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET indexing_task_id = $1 + WHERE indexing_task_id IS NULL AND uuid = $2`, id, pending.UUID) + if err != nil { + return false, xerrors.Errorf("updating mk12 indexing task id: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil } - pending := pendings[0] + var mk20Pendings []struct { + UUID string `db:"id"` + } - _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET indexing_task_id = $1 - WHERE indexing_task_id IS NULL AND uuid = $2`, id, pending.UUID) + err = tx.Select(&mk20Pendings, `SELECT id FROM market_mk20_pipeline + WHERE sealed = TRUE + AND indexing_task_id IS NULL + AND indexed = FALSE + ORDER BY indexing_created_at ASC LIMIT 1;`) + if err != nil { + return false, xerrors.Errorf("getting mk20 pending indexing tasks: %w", err) + } + + if len(mk20Pendings) == 0 { + return false, nil + } + + pending := mk20Pendings[0] + _, err = tx.Exec(`UPDATE market_mk20_pipeline SET indexing_task_id = $1 + WHERE indexing_task_id IS NULL AND id = $2`, id, pending.UUID) if err != nil { - return false, xerrors.Errorf("updating indexing task id: %w", err) + return false, xerrors.Errorf("updating mk20 indexing task id: %w", err) } - stop = false // we found a task to schedule, keep going + stop = false return true, nil }) } @@ -383,7 +832,9 @@ func (i *IndexingTask) Adder(taskFunc harmonytask.AddTaskFunc) { func (i *IndexingTask) GetSpid(db *harmonydb.DB, taskID int64) string { var spid string - err := db.QueryRow(context.Background(), `SELECT sp_id FROM market_mk12_deal_pipeline WHERE indexing_task_id = $1`, taskID).Scan(&spid) + err := db.QueryRow(context.Background(), `SELECT sp_id FROM market_mk12_deal_pipeline WHERE indexing_task_id = $1 + UNION ALL + SELECT sp_id FROM market_mk20_pipeline WHERE indexing_task_id = $1`, taskID).Scan(&spid) if err != nil { log.Errorf("getting spid: %s", err) return "" diff --git a/tasks/indexing/task_ipni.go b/tasks/indexing/task_ipni.go index 30878edea..845d29327 100644 --- a/tasks/indexing/task_ipni.go +++ b/tasks/indexing/task_ipni.go @@ -1,27 +1,28 @@ package indexing import ( - "bufio" "bytes" "context" "crypto/rand" + "database/sql" "errors" "fmt" - "io" "net/url" "strings" "time" + "github.com/google/uuid" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" - carv2 "github.com/ipld/go-car/v2" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipni/go-libipni/ingest/schema" "github.com/ipni/go-libipni/maurl" "github.com/ipni/go-libipni/metadata" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/oklog/ulid" "github.com/yugabyte/pgx/v5" + "golang.org/x/sync/errgroup" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -32,6 +33,8 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/pieceprovider" @@ -39,24 +42,25 @@ import ( "github.com/filecoin-project/curio/market/indexstore" "github.com/filecoin-project/curio/market/ipni/chunker" "github.com/filecoin-project/curio/market/ipni/ipniculib" + "github.com/filecoin-project/curio/market/mk20" ) var ilog = logging.Logger("ipni") type IPNITask struct { db *harmonydb.DB - indexStore *indexstore.IndexStore pieceProvider *pieceprovider.SectorReader + cpr *cachedreader.CachedPieceReader sc *ffi.SealCalls cfg *config.CurioConfig max taskhelp.Limiter } -func NewIPNITask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore.IndexStore, pieceProvider *pieceprovider.SectorReader, cfg *config.CurioConfig, max taskhelp.Limiter) *IPNITask { +func NewIPNITask(db *harmonydb.DB, sc *ffi.SealCalls, pieceProvider *pieceprovider.SectorReader, cpr *cachedreader.CachedPieceReader, cfg *config.CurioConfig, max taskhelp.Limiter) *IPNITask { return &IPNITask{ db: db, - indexStore: indexStore, pieceProvider: pieceProvider, + cpr: cpr, sc: sc, cfg: cfg, max: max, @@ -68,6 +72,7 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b var tasks []struct { SPID int64 `db:"sp_id"` + ID sql.NullString `db:"id"` Sector abi.SectorNumber `db:"sector"` Proof abi.RegisteredSealProof `db:"reg_seal_proof"` Offset int64 `db:"sector_offset"` @@ -78,7 +83,8 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b } err = I.db.Select(ctx, &tasks, `SELECT - sp_id, + sp_id, + id, sector, reg_seal_proof, sector_offset, @@ -105,12 +111,159 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b return true, nil } + if task.Rm { + comm, err := I.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var ads []struct { + ContextID []byte `db:"context_id"` + IsRm bool `db:"is_rm"` + Previous string `db:"previous"` + Provider string `db:"provider"` + Addresses string `db:"addresses"` + Entries string `db:"entries"` + Metadata []byte `db:"metadata"` + Pcid2 string `db:"piece_cid_v2"` + Pcid1 string `db:"piece_cid"` + Size int64 `db:"piece_size"` + } + + // Get the latest Ad + err = tx.Select(&ads, `SELECT + context_id, + is_rm, + previous, + provider, + addresses, + entries, + metadata, + piece_cid_v2, + piece_cid, + piece_size + FROM ipni + WHERE context_id = $1 + AND provider = $2 + ORDER BY order_number DESC + LIMIT 1`, task.CtxID, task.Prov) + + if err != nil { + return false, xerrors.Errorf("getting ad from DB: %w", err) + } + + if len(ads) == 0 { + return false, xerrors.Errorf("not original ad found for removal ad") + } + + if len(ads) > 1 { + return false, xerrors.Errorf("expected 1 ad but got %d", len(ads)) + } + + a := ads[0] + + e, err := cid.Parse(a.Entries) + if err != nil { + return false, xerrors.Errorf("parsing entry CID: %w", err) + } + + var prev string + + err = tx.QueryRow(`SELECT head FROM ipni_head WHERE provider = $1`, task.Prov).Scan(&prev) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("querying previous head: %w", err) + } + + prevCID, err := cid.Parse(prev) + if err != nil { + return false, xerrors.Errorf("parsing previous CID: %w", err) + } + + var privKey []byte + err = tx.QueryRow(`SELECT priv_key FROM ipni_peerid WHERE sp_id = $1`, task.SPID).Scan(&privKey) + if err != nil { + return false, xerrors.Errorf("failed to get private ipni-libp2p key for PDP: %w", err) + } + + pkey, err := crypto.UnmarshalPrivateKey(privKey) + if err != nil { + return false, xerrors.Errorf("unmarshaling private key: %w", err) + } + + adv := schema.Advertisement{ + PreviousID: cidlink.Link{Cid: prevCID}, + Provider: a.Provider, + Addresses: strings.Split(a.Addresses, "|"), + Entries: cidlink.Link{Cid: e}, + ContextID: a.ContextID, + IsRm: true, + Metadata: a.Metadata, + } + + err = adv.Sign(pkey) + if err != nil { + return false, xerrors.Errorf("signing the advertisement: %w", err) + } + + err = adv.Validate() + if err != nil { + return false, xerrors.Errorf("validating the advertisement: %w", err) + } + + adNode, err := adv.ToNode() + if err != nil { + return false, xerrors.Errorf("converting advertisement to node: %w", err) + } + + ad, err := ipniculib.NodeToLink(adNode, schema.Linkproto) + if err != nil { + return false, xerrors.Errorf("converting advertisement to link: %w", err) + } + + _, err = tx.Exec(`SELECT insert_ad_and_update_head($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + ad.(cidlink.Link).Cid.String(), adv.ContextID, a.Metadata, a.Pcid2, a.Pcid1, a.Size, adv.IsRm, adv.Provider, strings.Join(adv.Addresses, "|"), + adv.Signature, adv.Entries.String()) + + if err != nil { + return false, xerrors.Errorf("adding advertisement to the database: %w", err) + } + + n, err := tx.Exec(`UPDATE ipni_task SET complete = true WHERE task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to mark IPNI task complete: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updated %d rows", n) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("store IPNI success: %w", err) + } + + if !comm { + return false, xerrors.Errorf("store IPNI success: failed to commit the transaction") + } + + log.Infow("IPNI task complete", "task_id", taskID) + return true, nil + } + var pi abi.PieceInfo err = pi.UnmarshalCBOR(bytes.NewReader(task.CtxID)) if err != nil { return false, xerrors.Errorf("unmarshaling piece info: %w", err) } + var rawSize abi.UnpaddedPieceSize + err = I.db.QueryRow(ctx, `SELECT raw_size FROM market_piece_deal WHERE piece_cid = $1 AND piece_length = $2 LIMIT 1`, pi.PieceCID.String(), pi.Size).Scan(&rawSize) + if err != nil { + return false, xerrors.Errorf("querying raw size: %w", err) + } + + pcid2, err := commcidv2.PieceCidV2FromV1(pi.PieceCID, uint64(rawSize)) + if err != nil { + return false, xerrors.Errorf("getting piece CID v2: %w", err) + } + + // Try to read unsealed sector first (mk12 deal) reader, err := I.pieceProvider.ReadPiece(ctx, storiface.SectorRef{ ID: abi.SectorID{ Miner: abi.ActorID(task.SPID), @@ -119,27 +272,109 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b ProofType: task.Proof, }, storiface.PaddedByteIndex(task.Offset).Unpadded(), pi.Size.Unpadded(), pi.PieceCID) if err != nil { - return false, xerrors.Errorf("getting piece reader: %w", err) + serr := err + // Try to read piece (mk20 deal) + reader, _, err = I.cpr.GetSharedPieceReader(ctx, pcid2) + if err != nil { + return false, xerrors.Errorf("getting piece reader from sector and piece park: %w, %w", serr, err) + } } - opts := []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)} - blockReader, err := carv2.NewBlockReader(bufio.NewReaderSize(reader, 4<<20), opts...) - if err != nil { - return false, fmt.Errorf("getting block reader over piece: %w", err) + defer func() { + _ = reader.Close() + }() + + var isMK20 bool + + if task.ID.Valid { + _, err := ulid.Parse(task.ID.String) + if err == nil { + isMK20 = true + } else { + _, err := uuid.Parse(task.ID.String) + if err != nil { + return false, xerrors.Errorf("parsing task id: %w", err) + } + } } + recs := make(chan indexstore.Record, 1) + + var eg errgroup.Group + addFail := make(chan struct{}) + var interrupted bool + var subPieces []mk20.DataSource chk := chunker.NewInitialChunker() - blockMetadata, err := blockReader.SkipNext() - for err == nil { - if err := chk.Accept(blockMetadata.Hash(), int64(blockMetadata.Offset), blockMetadata.Size+40); err != nil { - return false, xerrors.Errorf("accepting block: %w", err) + eg.Go(func() error { + defer close(addFail) + for rec := range recs { + serr := chk.Accept(rec.Cid.Hash(), int64(rec.Offset), rec.Size) + if serr != nil { + addFail <- struct{}{} + return serr + } + } + return nil + }) + + if isMK20 { + id, serr := ulid.Parse(task.ID.String) + if serr != nil { + return false, xerrors.Errorf("parsing task id: %w", serr) + } + deal, serr := mk20.DealFromDB(ctx, I.db, id) + if serr != nil { + return false, xerrors.Errorf("getting deal from db: %w", serr) + } + + if deal.Data.Format.Raw != nil { + return false, xerrors.Errorf("raw data not supported") + } + + if deal.Data.Format.Car != nil { + _, interrupted, err = IndexCAR(reader, 4<<20, recs, addFail) + } + + if deal.Data.Format.Aggregate != nil { + if deal.Data.Format.Aggregate.Type > 0 { + var found bool + if len(deal.Data.Format.Aggregate.Sub) > 0 { + subPieces = deal.Data.Format.Aggregate.Sub + found = true + } + if len(deal.Data.SourceAggregate.Pieces) > 0 { + subPieces = deal.Data.SourceAggregate.Pieces + found = true + } + if !found { + return false, xerrors.Errorf("no sub pieces for aggregate mk20 deal") + } + _, _, interrupted, err = IndexAggregate(pcid2, reader, pi.Size, subPieces, recs, addFail) + } else { + return false, xerrors.Errorf("invalid aggregate type") + } } - blockMetadata, err = blockReader.SkipNext() + } else { + _, interrupted, err = IndexCAR(reader, 4<<20, recs, addFail) } - if !errors.Is(err, io.EOF) { - return false, xerrors.Errorf("reading block: %w", err) + + if err != nil { + // Chunking itself failed, stop early + close(recs) // still safe to close, chk.Accept() will exit on channel close + // wait for chk.Accept() goroutine to finish cleanly + _ = eg.Wait() + return false, xerrors.Errorf("chunking failed: %w", err) + } + + // Close the channel + close(recs) + + // Wait till is finished + err = eg.Wait() + if err != nil { + return false, xerrors.Errorf("adding index to chunk (interrupted %t): %w", interrupted, err) } // make sure we still own the task before writing to the database @@ -147,7 +382,7 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b return false, nil } - lnk, err := chk.Finish(ctx, I.db, pi.PieceCID) + lnk, err := chk.Finish(ctx, I.db, pcid2) if err != nil { return false, xerrors.Errorf("chunking CAR multihash iterator: %w", err) } @@ -160,7 +395,7 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b _, err = I.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { var prev string err = tx.QueryRow(`SELECT head FROM ipni_head WHERE provider = $1`, task.Prov).Scan(&prev) - if err != nil && err != pgx.ErrNoRows { + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return false, xerrors.Errorf("querying previous head: %w", err) } @@ -239,8 +474,8 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b return false, xerrors.Errorf("converting advertisement to link: %w", err) } - _, err = tx.Exec(`SELECT insert_ad_and_update_head($1, $2, $3, $4, $5, $6, $7, $8, $9)`, - ad.(cidlink.Link).Cid.String(), adv.ContextID, pi.PieceCID.String(), pi.Size, adv.IsRm, adv.Provider, strings.Join(adv.Addresses, "|"), + _, err = tx.Exec(`SELECT insert_ad_and_update_head($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + ad.(cidlink.Link).Cid.String(), adv.ContextID, md, pcid2.String(), pi.PieceCID.String(), pi.Size, adv.IsRm, adv.Provider, strings.Join(adv.Addresses, "|"), adv.Signature, adv.Entries.String()) if err != nil { @@ -268,57 +503,7 @@ func (I *IPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done b } func (I *IPNITask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector"` - StorageID string `db:"storage_id"` - } - - if storiface.FTUnsealed != 1 { - panic("storiface.FTUnsealed != 1") - } - - ctx := context.Background() - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - - err := I.db.Select(ctx, &tasks, ` - SELECT dp.task_id, dp.sp_id, dp.sector, l.storage_id FROM ipni_task dp - INNER JOIN sector_location l ON dp.sp_id = l.miner_id AND dp.sector = l.sector_num - WHERE dp.task_id = ANY ($1) AND l.sector_filetype = 1 -`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := I.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - if _, ok := acceptables[t.TaskID]; !ok { - continue - } - - for _, l := range ls { - if string(l.ID) == t.StorageID { - return &t.TaskID, nil - } - } - } - - return nil, nil + return &ids[0], nil } func (I *IPNITask) TypeDetails() harmonytask.TaskTypeDetails { @@ -348,6 +533,7 @@ func (I *IPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFun var stop bool for !stop { var markComplete *string + var mk20 bool taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { stop = true // assume we're done until we find a task to schedule @@ -355,20 +541,43 @@ func (I *IPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFun var pendings []itask err := tx.Select(&pendings, `SELECT - uuid, - sp_id, - sector, - piece_cid, - piece_size, - sector_offset, - reg_seal_proof, - raw_size, - should_index, - announce - FROM market_mk12_deal_pipeline + uuid, + sp_id, + sector, + piece_cid, + piece_size, + sector_offset, + reg_seal_proof, + raw_size, + should_index, + announce, + indexing_created_at, + FALSE as mk20 + FROM market_mk12_deal_pipeline WHERE sealed = TRUE - AND indexed = TRUE - AND complete = FALSE + AND indexed = TRUE + AND complete = FALSE + + UNION ALL + + SELECT + id AS uuid, + sp_id, + sector, + piece_cid, + piece_size, + sector_offset, + reg_seal_proof, + raw_size, + indexing AS should_index, + announce, + indexing_created_at, + TRUE as mk20 + FROM market_mk20_pipeline + WHERE sealed = TRUE + AND indexed = TRUE + AND complete = FALSE + ORDER BY indexing_created_at ASC LIMIT 1;`) if err != nil { @@ -384,9 +593,14 @@ func (I *IPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFun // Skip IPNI if deal says not to announce or not to index (fast retrievals). If we announce without // indexing, it will cause issue with retrievals. if !p.Announce || !p.ShouldIndex { - n, err := tx.Exec(`UPDATE market_mk12_deal_pipeline SET complete = TRUE WHERE uuid = $1`, p.UUID) + var n int + if p.Mk20 { + n, err = tx.Exec(`UPDATE market_mk20_pipeline SET complete = TRUE WHERE id = $1`, p.UUID) + } else { + n, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET complete = TRUE WHERE uuid = $1`, p.UUID) + } if err != nil { - return false, xerrors.Errorf("store IPNI success: updating pipeline (1): %w", err) + return false, xerrors.Errorf("store IPNI success: updating pipeline: %w", err) } if n != 1 { return false, xerrors.Errorf("store IPNI success: updated %d rows", n) @@ -454,20 +668,22 @@ func (I *IPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFun return false, xerrors.Errorf("marshaling piece info: %w", err) } - _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8)`, p.SpID, + _, err = tx.Exec(`SELECT insert_ipni_task($1, $2, $3, $4, $5, $6, $7, $8, $9)`, p.UUID, p.SpID, p.Sector, p.Proof, p.Offset, b.Bytes(), false, pid.String(), id) if err != nil { if harmonydb.IsErrUniqueContraint(err) { ilog.Infof("Another IPNI announce task already present for piece %s in deal %s", p.PieceCid, p.UUID) // SET "complete" status to true for this deal, so it is not considered next time markComplete = &p.UUID + mk20 = p.Mk20 stop = false // we found a sector to work on, keep going - return true, nil + return false, nil } if strings.Contains(err.Error(), "already published") { ilog.Infof("Piece %s in deal %s is already published", p.PieceCid, p.UUID) // SET "complete" status to true for this deal, so it is not considered next time markComplete = &p.UUID + mk20 = p.Mk20 stop = false // we found a sector to work on, keep going return false, nil } @@ -479,7 +695,13 @@ func (I *IPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFun }) if markComplete != nil { - n, err := I.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET complete = TRUE WHERE uuid = $1 AND complete = FALSE`, *markComplete) + var n int + var err error + if mk20 { + n, err = I.db.Exec(ctx, `UPDATE market_mk20_pipeline SET complete = TRUE WHERE id = $1 AND complete = FALSE`, *markComplete) + } else { + n, err = I.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET complete = TRUE WHERE uuid = $1 AND complete = FALSE`, *markComplete) + } if err != nil { log.Errorf("store IPNI success: updating pipeline (2): %s", err) } diff --git a/tasks/indexing/task_pdp_indexing.go b/tasks/indexing/task_pdp_indexing.go new file mode 100644 index 000000000..bb909bae0 --- /dev/null +++ b/tasks/indexing/task_pdp_indexing.go @@ -0,0 +1,374 @@ +package indexing + +import ( + "context" + "errors" + "time" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/mk20" +) + +type PDPIndexingTask struct { + db *harmonydb.DB + indexStore *indexstore.IndexStore + cpr *cachedreader.CachedPieceReader + sc *ffi.SealCalls + cfg *config.CurioConfig + insertConcurrency int + insertBatchSize int + max taskhelp.Limiter +} + +func NewPDPIndexingTask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore.IndexStore, cpr *cachedreader.CachedPieceReader, cfg *config.CurioConfig, max taskhelp.Limiter) *PDPIndexingTask { + + return &PDPIndexingTask{ + db: db, + indexStore: indexStore, + cpr: cpr, + sc: sc, + cfg: cfg, + insertConcurrency: cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency, + insertBatchSize: cfg.Market.StorageMarketConfig.Indexing.InsertBatchSize, + max: max, + } +} + +func (P *PDPIndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var tasks []struct { + ID string `db:"id"` + PieceCIDV2 string `db:"piece_cid_v2"` + PieceRef int64 `db:"piece_ref"` + Indexing bool `db:"indexing"` + } + + err = P.db.Select(ctx, &tasks, `SELECT id, piece_cid_v2, piece_ref, indexing FROM pdp_pipeline WHERE indexing_task_id = $1 AND indexed = FALSE`, taskID) + if err != nil { + return false, xerrors.Errorf("getting PDP pending indexing tasks: %w", err) + } + + if len(tasks) != 1 { + return false, xerrors.Errorf("incorrect rows for pending indexing tasks: %d", len(tasks)) + } + + task := tasks[0] + + pcid2, err := cid.Parse(task.PieceCIDV2) + if err != nil { + return false, xerrors.Errorf("parsing piece CID: %w", err) + } + + pi, err := mk20.GetPieceInfo(pcid2) + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + var indexed bool + err = P.db.QueryRow(ctx, `SELECT indexed FROM market_piece_metadata WHERE piece_cid = $1 and piece_size = $2`, pi.PieceCIDV1.String(), pi.Size).Scan(&indexed) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("checking if piece %s is already indexed: %w", task.PieceCIDV2, err) + } + + id, err := ulid.Parse(task.ID) + if err != nil { + return false, xerrors.Errorf("parsing task id: %w", err) + } + + deal, err := mk20.DealFromDB(ctx, P.db, id) + if err != nil { + return false, xerrors.Errorf("getting deal from db: %w", err) + } + + var subPieces []mk20.DataSource + var byteData bool + + if deal.Data.Format.Aggregate != nil { + if deal.Data.Format.Aggregate.Type > 0 { + var found bool + if len(deal.Data.Format.Aggregate.Sub) > 0 { + subPieces = deal.Data.Format.Aggregate.Sub + found = true + } + if len(deal.Data.SourceAggregate.Pieces) > 0 { + subPieces = deal.Data.SourceAggregate.Pieces + found = true + } + if !found { + return false, xerrors.Errorf("no sub pieces for aggregate PDP deal") + } + } + } + + if deal.Data.Format.Raw != nil { + byteData = true + } + + if indexed || !task.Indexing || byteData { + err = P.recordCompletion(ctx, taskID, task.ID, pi.PieceCIDV1.String(), int64(pi.Size), int64(pi.RawSize), task.PieceRef, false) + if err != nil { + return false, err + } + log.Infow("Piece already indexed or should not be indexed", "piece_cid", task.PieceCIDV2, "indexed", indexed, "should_index", task.Indexing, "id", task.ID, "sp_id") + + return true, nil + } + + reader, _, err := P.cpr.GetSharedPieceReader(ctx, pcid2) + + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + defer func() { + _ = reader.Close() + }() + + startTime := time.Now() + + dealCfg := P.cfg.Market.StorageMarketConfig + chanSize := dealCfg.Indexing.InsertConcurrency * dealCfg.Indexing.InsertBatchSize + + recs := make(chan indexstore.Record, chanSize) + var blocks int64 + + var eg errgroup.Group + addFail := make(chan struct{}) + var interrupted bool + + eg.Go(func() error { + defer close(addFail) + return P.indexStore.AddIndex(ctx, pcid2, recs) + }) + + var aggidx map[cid.Cid][]indexstore.Record + + if len(subPieces) > 0 { + blocks, aggidx, interrupted, err = IndexAggregate(pcid2, reader, pi.Size, subPieces, recs, addFail) + } else { + blocks, interrupted, err = IndexCAR(reader, 4<<20, recs, addFail) + } + + if err != nil { + // Indexing itself failed, stop early + close(recs) // still safe to close, AddIndex will exit on channel close + // wait for AddIndex goroutine to finish cleanly + _ = eg.Wait() + return false, xerrors.Errorf("indexing failed: %w", err) + } + + // Close the channel + close(recs) + + // Wait till AddIndex is finished + err = eg.Wait() + if err != nil { + return false, xerrors.Errorf("adding index to DB (interrupted %t): %w", interrupted, err) + } + + log.Infof("Indexing deal %s took %0.3f seconds", task.ID, time.Since(startTime).Seconds()) + + // Save aggregate index if present + for k, v := range aggidx { + if len(v) > 0 { + err = P.indexStore.InsertAggregateIndex(ctx, k, v) + if err != nil { + return false, xerrors.Errorf("inserting aggregate index: %w", err) + } + } + } + + err = P.recordCompletion(ctx, taskID, task.ID, pi.PieceCIDV1.String(), int64(pi.Size), int64(pi.RawSize), task.PieceRef, true) + if err != nil { + return false, err + } + + blocksPerSecond := float64(blocks) / time.Since(startTime).Seconds() + log.Infow("Piece indexed", "piece_cid", task.PieceCIDV2, "id", task.ID, "blocks", blocks, "blocks_per_second", blocksPerSecond) + + return true, nil +} + +func (P *PDPIndexingTask) recordCompletion(ctx context.Context, taskID harmonytask.TaskID, id, PieceCID string, size, rawSize, pieceRef int64, indexed bool) error { + comm, err := P.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + _, err = tx.Exec(`SELECT process_piece_deal($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + id, PieceCID, false, -1, -1, nil, size, rawSize, indexed, pieceRef, false, 0) + if err != nil { + return false, xerrors.Errorf("failed to update piece metadata and piece deal for deal %s: %w", id, err) + } + + if P.cfg.Market.StorageMarketConfig.IPNI.Disable { + n, err := P.db.Exec(ctx, `UPDATE pdp_pipeline SET indexed = TRUE, indexing_task_id = NULL, + complete = TRUE WHERE id = $1 AND indexing_task_id = $2`, id, taskID) + if err != nil { + return false, xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("store indexing success: updated %d rows", n) + } + } else { + n, err := tx.Exec(`UPDATE pdp_pipeline SET indexed = TRUE, indexing_task_id = NULL + WHERE id = $1 AND indexing_task_id = $2`, id, taskID) + if err != nil { + return false, xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("store indexing success: updated %d rows", n) + } + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("committing transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + + return nil +} + +func (P *PDPIndexingTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + ctx := context.Background() + + type task struct { + TaskID harmonytask.TaskID `db:"indexing_task_id"` + StorageID string `db:"storage_id"` + PieceRef int64 `db:"piece_ref"` + Indexing bool `db:"indexing"` + } + + indIDs := make([]int64, len(ids)) + for x, id := range ids { + indIDs[x] = int64(id) + } + + var tasks []*task + if storiface.FTPiece != 32 { + panic("storiface.FTPiece != 32") + } + + err := P.db.Select(ctx, &tasks, `SELECT indexing_task_id, piece_ref, indexing FROM pdp_pipeline WHERE indexing_task_id = ANY($1)`, indIDs) + if err != nil { + return nil, xerrors.Errorf("getting PDP indexing details: %w", err) + } + + for _, t := range tasks { + + if !t.Indexing { + continue + } + + var sLocation string + err = P.db.QueryRow(ctx, ` + SELECT sl.storage_id + FROM parked_piece_refs ppr + JOIN sector_location sl + ON sl.sector_num = ppr.piece_id + AND sl.miner_id = 0 + AND sl.sector_filetype = 32 + WHERE ppr.ref_id = $1 + `, t.PieceRef).Scan(&sLocation) + if err != nil { + return nil, xerrors.Errorf("getting storage_id: %w", err) + } + + t.StorageID = sLocation + } + + ls, err := P.sc.LocalStorage(ctx) + if err != nil { + return nil, xerrors.Errorf("getting local storage: %w", err) + } + + localStorageMap := make(map[string]bool, len(ls)) + for _, l := range ls { + localStorageMap[string(l.ID)] = true + } + + for _, t := range tasks { + if !t.Indexing { + return &t.TaskID, nil + } + if found, ok := localStorageMap[t.StorageID]; ok && found { + return &t.TaskID, nil + } + } + + return nil, nil +} + +func (P *PDPIndexingTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Name: "PDPIndexing", + Cost: resources.Resources{ + Cpu: 1, + Ram: uint64(P.insertBatchSize * P.insertConcurrency * 56 * 2), + }, + Max: P.max, + MaxFailures: 3, + IAmBored: passcall.Every(3*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return P.schedule(context.Background(), taskFunc) + }), + } +} + +func (P *PDPIndexingTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + // schedule submits + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var pendings []struct { + ID string `db:"id"` + } + + err := tx.Select(&pendings, `SELECT id FROM pdp_pipeline + WHERE after_save_cache = TRUE + AND indexing_task_id IS NULL + AND indexed = FALSE + ORDER BY indexing_created_at ASC LIMIT 1;`) + if err != nil { + return false, xerrors.Errorf("getting PDP pending indexing tasks: %w", err) + } + + if len(pendings) == 0 { + return false, nil + } + + pending := pendings[0] + _, err = tx.Exec(`UPDATE pdp_pipeline SET indexing_task_id = $1 + WHERE indexing_task_id IS NULL AND id = $2`, id, pending.ID) + if err != nil { + return false, xerrors.Errorf("updating PDP indexing task id: %w", err) + } + + stop = false + return true, nil + }) + } + + return nil +} + +func (P *PDPIndexingTask) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &PDPIndexingTask{} +var _ = harmonytask.Reg(&PDPIndexingTask{}) diff --git a/tasks/indexing/task_pdp_ipni.go b/tasks/indexing/task_pdp_ipni.go new file mode 100644 index 000000000..9f4bcc74c --- /dev/null +++ b/tasks/indexing/task_pdp_ipni.go @@ -0,0 +1,755 @@ +package indexing + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipni/go-libipni/ingest/schema" + "github.com/ipni/go-libipni/maurl" + "github.com/ipni/go-libipni/metadata" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/commcidv2" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/ipni/chunker" + "github.com/filecoin-project/curio/market/ipni/ipniculib" + "github.com/filecoin-project/curio/market/ipni/types" + "github.com/filecoin-project/curio/market/mk20" +) + +type PDPIPNITask struct { + db *harmonydb.DB + cpr *cachedreader.CachedPieceReader + sc *ffi.SealCalls + cfg *config.CurioConfig + max taskhelp.Limiter +} + +func NewPDPIPNITask(db *harmonydb.DB, sc *ffi.SealCalls, cpr *cachedreader.CachedPieceReader, cfg *config.CurioConfig, max taskhelp.Limiter) *PDPIPNITask { + return &PDPIPNITask{ + db: db, + cpr: cpr, + sc: sc, + cfg: cfg, + max: max, + } +} + +func (P *PDPIPNITask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var tasks []struct { + ID string `db:"id"` + CtxID []byte `db:"context_id"` + Rm bool `db:"is_rm"` + Prov string `db:"provider"` + Complete bool `db:"complete"` + } + + err = P.db.Select(ctx, &tasks, `SELECT + id, + context_id, + is_rm, + provider, + complete + FROM + pdp_ipni_task + WHERE + task_id = $1;`, taskID) + if err != nil { + return false, xerrors.Errorf("getting ipni task params: %w", err) + } + + if len(tasks) != 1 { + return false, xerrors.Errorf("expected 1 ipni task params, got %d", len(tasks)) + } + + task := tasks[0] + + if task.Complete { + log.Infow("IPNI task already complete", "task_id", taskID) + return true, nil + } + + if task.Rm { + comm, err := P.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var ads []struct { + ContextID []byte `db:"context_id"` + IsRm bool `db:"is_rm"` + Previous string `db:"previous"` + Provider string `db:"provider"` + Addresses string `db:"addresses"` + Entries string `db:"entries"` + Metadata []byte `db:"metadata"` + Pcid2 string `db:"piece_cid_v2"` + Pcid1 string `db:"piece_cid"` + Size int64 `db:"piece_size"` + } + + // Get the latest Ad + err = tx.Select(&ads, `SELECT + context_id, + is_rm, + previous, + provider, + addresses, + entries, + metadata, + piece_cid_v2, + piece_cid, + piece_size + FROM ipni + WHERE context_id = $1 + AND provider = $2 + ORDER BY order_number DESC + LIMIT 1`, task.CtxID, task.Prov) + + if err != nil { + return false, xerrors.Errorf("getting ad from DB: %w", err) + } + + if len(ads) == 0 { + return false, xerrors.Errorf("not original ad found for removal ad") + } + + if len(ads) > 1 { + return false, xerrors.Errorf("expected 1 ad but got %d", len(ads)) + } + + a := ads[0] + + e, err := cid.Parse(a.Entries) + if err != nil { + return false, xerrors.Errorf("parsing entry CID: %w", err) + } + + var prev string + + err = tx.QueryRow(`SELECT head FROM ipni_head WHERE provider = $1`, task.Prov).Scan(&prev) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("querying previous head: %w", err) + } + + prevCID, err := cid.Parse(prev) + if err != nil { + return false, xerrors.Errorf("parsing previous CID: %w", err) + } + + var privKey []byte + err = tx.QueryRow(`SELECT priv_key FROM ipni_peerid WHERE sp_id = $1`, -1).Scan(&privKey) + if err != nil { + return false, xerrors.Errorf("failed to get private ipni-libp2p key for PDP: %w", err) + } + + pkey, err := crypto.UnmarshalPrivateKey(privKey) + if err != nil { + return false, xerrors.Errorf("unmarshaling private key: %w", err) + } + + adv := schema.Advertisement{ + PreviousID: cidlink.Link{Cid: prevCID}, + Provider: a.Provider, + Addresses: strings.Split(a.Addresses, "|"), + Entries: cidlink.Link{Cid: e}, + ContextID: a.ContextID, + IsRm: true, + Metadata: a.Metadata, + } + + err = adv.Sign(pkey) + if err != nil { + return false, xerrors.Errorf("signing the advertisement: %w", err) + } + + err = adv.Validate() + if err != nil { + return false, xerrors.Errorf("validating the advertisement: %w", err) + } + + adNode, err := adv.ToNode() + if err != nil { + return false, xerrors.Errorf("converting advertisement to node: %w", err) + } + + ad, err := ipniculib.NodeToLink(adNode, schema.Linkproto) + if err != nil { + return false, xerrors.Errorf("converting advertisement to link: %w", err) + } + + _, err = tx.Exec(`SELECT insert_ad_and_update_head($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + ad.(cidlink.Link).Cid.String(), adv.ContextID, a.Metadata, a.Pcid2, a.Pcid1, a.Size, adv.IsRm, adv.Provider, strings.Join(adv.Addresses, "|"), + adv.Signature, adv.Entries.String()) + + if err != nil { + return false, xerrors.Errorf("adding advertisement to the database: %w", err) + } + + n, err := tx.Exec(`UPDATE pdp_ipni_task SET complete = true WHERE task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to mark IPNI task complete: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updated %d rows", n) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("store IPNI success: %w", err) + } + + if !comm { + return false, xerrors.Errorf("store IPNI success: failed to commit the transaction") + } + + log.Infow("IPNI task complete", "task_id", taskID) + return true, nil + } + + pinfo := &types.PdpIpniContext{} + err = pinfo.Unmarshal(task.CtxID) + if err != nil { + return false, xerrors.Errorf("unmarshaling piece info: %w", err) + } + + pcid2 := pinfo.PieceCID + + pi, err := commcidv2.CommPFromPCidV2(pcid2) + if err != nil { + return false, xerrors.Errorf("getting piece info from piece cid: %w", err) + } + + var lnk ipld.Link + + if pinfo.Payload { + reader, _, err := P.cpr.GetSharedPieceReader(ctx, pcid2) + if err != nil { + return false, xerrors.Errorf("getting piece reader from piece park: %w", err) + } + + defer func() { + _ = reader.Close() + }() + + recs := make(chan indexstore.Record, 1) + + var eg errgroup.Group + addFail := make(chan struct{}) + var interrupted bool + var subPieces []mk20.DataSource + chk := chunker.NewInitialChunker() + + eg.Go(func() error { + defer close(addFail) + for rec := range recs { + serr := chk.Accept(rec.Cid.Hash(), int64(rec.Offset), rec.Size) + if serr != nil { + addFail <- struct{}{} + return serr + } + } + return nil + }) + + id, serr := ulid.Parse(task.ID) + if serr != nil { + return false, xerrors.Errorf("parsing task id: %w", serr) + } + deal, serr := mk20.DealFromDB(ctx, P.db, id) + if serr != nil { + return false, xerrors.Errorf("getting deal from db: %w", serr) + } + + if deal.Data.Format.Raw != nil { + return false, xerrors.Errorf("raw data not supported") + } + + if deal.Data.Format.Car != nil { + _, interrupted, err = IndexCAR(reader, 4<<20, recs, addFail) + } + + if deal.Data.Format.Aggregate != nil { + if deal.Data.Format.Aggregate.Type > 0 { + var found bool + if len(deal.Data.Format.Aggregate.Sub) > 0 { + subPieces = deal.Data.Format.Aggregate.Sub + found = true + } + if len(deal.Data.SourceAggregate.Pieces) > 0 { + subPieces = deal.Data.SourceAggregate.Pieces + found = true + } + if !found { + return false, xerrors.Errorf("no sub pieces for aggregate mk20 deal") + } + _, _, interrupted, err = IndexAggregate(pcid2, reader, pi.PieceInfo().Size, subPieces, recs, addFail) + } else { + return false, xerrors.Errorf("invalid aggregate type") + } + } + + if err != nil { + // Chunking itself failed, stop early + close(recs) // still safe to close, chk.Accept() will exit on channel close + // wait for chk.Accept() goroutine to finish cleanly + _ = eg.Wait() + return false, xerrors.Errorf("chunking failed: %w", err) + } + + // Close the channel + close(recs) + + // Wait till is finished + err = eg.Wait() + if err != nil { + return false, xerrors.Errorf("adding index to chunk (interrupted %t): %w", interrupted, err) + } + + // make sure we still own the task before writing to the database + if !stillOwned() { + return false, nil + } + + lnk, err = chk.Finish(ctx, P.db, pcid2) + if err != nil { + return false, xerrors.Errorf("chunking CAR multihash iterator: %w", err) + } + } else { + chk := chunker.NewInitialChunker() + err = chk.Accept(pcid2.Hash(), 0, uint64(pi.PieceInfo().Size)) + if err != nil { + return false, xerrors.Errorf("adding index to chunk: %w", err) + } + lnk, err = chk.Finish(ctx, P.db, pcid2) + if err != nil { + return false, xerrors.Errorf("chunking CAR multihash iterator: %w", err) + } + } + + // make sure we still own the task before writing ad chains + if !stillOwned() { + return false, nil + } + + _, err = P.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var prev string + err = tx.QueryRow(`SELECT head FROM ipni_head WHERE provider = $1`, task.Prov).Scan(&prev) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("querying previous head: %w", err) + } + + var md []byte + if pinfo.Payload { + mds := metadata.IpfsGatewayHttp{} + mdb, err := mds.MarshalBinary() + if err != nil { + return false, xerrors.Errorf("marshaling metadata: %w", err) + } + md = mdb + } else { + mds := metadata.FilecoinPieceHttp{} + mdb, err := mds.MarshalBinary() + if err != nil { + return false, xerrors.Errorf("marshaling metadata: %w", err) + } + md = mdb + } + + var privKey []byte + err = tx.QueryRow(`SELECT priv_key FROM ipni_peerid WHERE sp_id = $1`, -1).Scan(&privKey) + if err != nil { + return false, xerrors.Errorf("failed to get private ipni-libp2p key for PDP: %w", err) + } + + pkey, err := crypto.UnmarshalPrivateKey(privKey) + if err != nil { + return false, xerrors.Errorf("unmarshaling private key: %w", err) + } + + adv := schema.Advertisement{ + Provider: task.Prov, + Entries: lnk, + ContextID: task.CtxID, + Metadata: md, + IsRm: task.Rm, + } + + { + u, err := url.Parse(fmt.Sprintf("https://%s", P.cfg.HTTP.DomainName)) + if err != nil { + return false, xerrors.Errorf("parsing announce address domain: %w", err) + } + if build.BuildType != build.BuildMainnet && build.BuildType != build.BuildCalibnet { + ls := strings.Split(P.cfg.HTTP.ListenAddress, ":") + u, err = url.Parse(fmt.Sprintf("http://%s:%s", P.cfg.HTTP.DomainName, ls[1])) + if err != nil { + return false, xerrors.Errorf("parsing announce address domain: %w", err) + } + } + + addr, err := maurl.FromURL(u) + if err != nil { + return false, xerrors.Errorf("converting URL to multiaddr: %w", err) + } + + adv.Addresses = append(adv.Addresses, addr.String()) + } + + if prev != "" { + prevCID, err := cid.Parse(prev) + if err != nil { + return false, xerrors.Errorf("parsing previous CID: %w", err) + } + + adv.PreviousID = cidlink.Link{Cid: prevCID} + } + + err = adv.Sign(pkey) + if err != nil { + return false, xerrors.Errorf("signing the advertisement: %w", err) + } + + err = adv.Validate() + if err != nil { + return false, xerrors.Errorf("validating the advertisement: %w", err) + } + + adNode, err := adv.ToNode() + if err != nil { + return false, xerrors.Errorf("converting advertisement to node: %w", err) + } + + ad, err := ipniculib.NodeToLink(adNode, schema.Linkproto) + if err != nil { + return false, xerrors.Errorf("converting advertisement to link: %w", err) + } + + _, err = tx.Exec(`SELECT insert_ad_and_update_head($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + ad.(cidlink.Link).Cid.String(), adv.ContextID, md, pcid2.String(), pi.PieceInfo().PieceCID.String(), pi.PieceInfo().Size, adv.IsRm, adv.Provider, strings.Join(adv.Addresses, "|"), + adv.Signature, adv.Entries.String()) + + if err != nil { + return false, xerrors.Errorf("adding advertisement to the database: %w", err) + } + + n, err := tx.Exec(`UPDATE pdp_ipni_task SET complete = true WHERE task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to mark IPNI task complete: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("updated %d rows", n) + } + + return true, nil + + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("store IPNI success: %w", err) + } + + log.Infow("IPNI task complete", "task_id", taskID) + + return true, nil +} + +func (P *PDPIPNITask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (P *PDPIPNITask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Name: "PDPIpni", + Cost: resources.Resources{ + Cpu: 1, + Ram: 1 << 30, + }, + MaxFailures: 3, + IAmBored: passcall.Every(30*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return P.schedule(context.Background(), taskFunc) + }), + Max: P.max, + } +} + +func (P *PDPIPNITask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + if P.cfg.Market.StorageMarketConfig.IPNI.Disable { + return nil + } + + // schedule submits + var stop bool + for !stop { + var markComplete, markCompletePayload, complete *string + + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var pendings []struct { + ID string `db:"id"` + PieceCid string `db:"piece_cid_v2"` + Announce bool `db:"announce"` + AnnouncePayload bool `db:"announce_payload"` + Announced bool `db:"announced"` + AnnouncedPayload bool `db:"announced_payload"` + } + + err := tx.Select(&pendings, `SELECT + id, + piece_cid_v2, + announce, + announce_payload, + announced, + announced_payload + FROM pdp_pipeline + WHERE indexed = TRUE + AND complete = FALSE + LIMIT 1;`) + if err != nil { + return false, xerrors.Errorf("getting pending IPNI announcing tasks: %w", err) + } + + if len(pendings) == 0 { + return false, nil + } + + p := pendings[0] + + // Mark deal is complete if: + // 1. We don't need to announce anything + // 2. Both type of announcements are done + if !(p.Announce && p.AnnouncePayload) || (p.Announced && p.AnnouncedPayload) { //nolint:staticcheck + complete = &p.ID + return false, nil + } + + var privKey []byte + var peerIDStr string + err = tx.QueryRow(`SELECT priv_key, peer_id FROM ipni_peerid WHERE sp_id = $1`, -1).Scan(&privKey, &peerIDStr) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("failed to get private libp2p key for PDP: %w", err) + } + + //var pkey []byte + + // TODO: Connect to PDP owner key. Might not be the best approach as keys seem incompatible. + //err = tx.QueryRow(`SELECT private_key FROM eth_keys WHERE role = 'pdp'`).Scan(&pkey) + //if err != nil { + // return false, xerrors.Errorf("failed to get private eth key for PDP: %w", err) + //} + + // generate the ipni provider key + pk, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return false, xerrors.Errorf("failed to generate a new key: %w", err) + } + + privKey, err = crypto.MarshalPrivateKey(pk) + if err != nil { + return false, xerrors.Errorf("failed to marshal the private key: %w", err) + } + + //pk, err := crypto.UnmarshalPrivateKey(pkey) + //if err != nil { + // return false, xerrors.Errorf("unmarshaling private key: %w", err) + //} + + pid, err := peer.IDFromPublicKey(pk.GetPublic()) + if err != nil { + return false, xerrors.Errorf("getting peer ID: %w", err) + } + + n, err := tx.Exec(`INSERT INTO ipni_peerid (sp_id, priv_key, peer_id) VALUES ($1, $2, $3) ON CONFLICT(sp_id) DO NOTHING `, -1, privKey, pid.String()) + if err != nil { + return false, xerrors.Errorf("failed to to insert the key into DB: %w", err) + } + + if n == 0 { + return false, xerrors.Errorf("failed to insert the key into db") + } + + peerIDStr = pid.String() + } + + pid, err := peer.Decode(peerIDStr) + if err != nil { + return false, fmt.Errorf("decoding peer ID: %w", err) + } + + pcid, err := cid.Parse(p.PieceCid) + if err != nil { + return false, xerrors.Errorf("parsing piece CID: %w", err) + } + + // If we need to announce payload and haven't done so, then do it first + if p.AnnouncePayload && !p.AnnouncedPayload { + pi := &types.PdpIpniContext{ + PieceCID: pcid, + Payload: true, + } + + iContext, err := pi.Marshal() + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, iContext, false, p.ID, pid.String(), id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + ilog.Infof("Another IPNI announce task already present for piece %s and payload %d in deal %s", p.PieceCid, p.AnnouncePayload, p.ID) + stop = false // we found a sector to work on, keep going + markCompletePayload = &p.ID + return false, nil + } + if strings.Contains(err.Error(), "already published") { + ilog.Infof("Piece %s in deal %s is already published", p.PieceCid, p.ID) + stop = false // we found a sector to work on, keep going + markCompletePayload = &p.ID + return false, nil + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + stop = false + markCompletePayload = &p.ID + // Return early while commiting so we mark complete for payload announcement + return true, nil + } + + // If we don't need to announce payload, mark it as complete so pipeline does not try that + if !p.AnnouncePayload && !p.AnnouncedPayload { + stop = false + markCompletePayload = &p.ID + // Rerun early without commiting so we mark complete for payload announcement + return false, nil + } + + // If we need to announce piece and haven't done so then do it + if p.Announce && !p.Announced { + pi := &types.PdpIpniContext{ + PieceCID: pcid, + Payload: false, + } + + iContext, err := pi.Marshal() + if err != nil { + return false, xerrors.Errorf("marshaling piece info: %w", err) + } + + _, err = tx.Exec(`SELECT insert_pdp_ipni_task($1, $2, $3, $4, $5)`, iContext, false, p.ID, pid.String(), id) + if err != nil { + if harmonydb.IsErrUniqueContraint(err) { + ilog.Infof("Another IPNI announce task already present for piece %s and payload %d in deal %s", p.PieceCid, p.AnnouncePayload, p.ID) + stop = false // we found a sector to work on, keep going + markComplete = &p.ID + return false, nil + + } + if strings.Contains(err.Error(), "already published") { + ilog.Infof("Piece %s in deal %s is already published", p.PieceCid, p.ID) + stop = false // we found a sector to work on, keep going + markComplete = &p.ID + return false, nil + + } + return false, xerrors.Errorf("updating IPNI announcing task id: %w", err) + } + stop = false + markComplete = &p.ID + // Return early while commiting so we mark complete for piece announcement + return true, nil + } + + // If we don't need to announce piece, mark it as complete so pipeline does not try that + if !p.Announce && !p.Announced { + stop = false + markComplete = &p.ID + // Rerun early without commiting so we mark complete for payload announcement + return false, nil + } + + return false, xerrors.Errorf("no task to schedule") + }) + + if markComplete != nil { + n, err := P.db.Exec(ctx, `UPDATE pdp_pipeline SET announced = TRUE WHERE id = $1`, *markComplete) + if err != nil { + log.Errorf("store IPNI success: updating pipeline: %w", err) + } + if n != 1 { + log.Errorf("store IPNI success: updated %d rows", n) + } + } + + if markCompletePayload != nil { + n, err := P.db.Exec(ctx, `UPDATE pdp_pipeline SET announced_payload = TRUE WHERE id = $1`, *markCompletePayload) + if err != nil { + log.Errorf("store IPNI success: updating pipeline: %w", err) + } + if n != 1 { + log.Errorf("store IPNI success: updated %d rows", n) + } + } + + if complete != nil { + comm, err := P.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE pdp_pipeline SET complete = TRUE WHERE id = $1`, *complete) + + if err != nil { + return false, xerrors.Errorf("updating pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected to update 1 row but updated %d rows", n) + } + + n, err = tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set(pdp_v1, '{complete}', 'true'::jsonb, true) + WHERE id = $1;`, *complete) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + stop = false // we found a task to schedule, keep going + ilog.Debugf("Deal %s is marked as complete", *complete) + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("marking deal as complete: %w", err) + } + if !comm { + return xerrors.Errorf("marking deal as complete: failed to commit transaction") + } + } + } + + return nil +} + +func (P *PDPIPNITask) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &PDPIPNITask{} +var _ = harmonytask.Reg(&PDPIPNITask{}) diff --git a/tasks/pdp/data_set_create_watch.go b/tasks/pdp/data_set_create_watch.go new file mode 100644 index 000000000..9b13e7eb0 --- /dev/null +++ b/tasks/pdp/data_set_create_watch.go @@ -0,0 +1,222 @@ +package pdp + +import ( + "context" + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/chainsched" + "github.com/filecoin-project/curio/pdp/contract" + + chainTypes "github.com/filecoin-project/lotus/chain/types" +) + +type DataSetCreate struct { + CreateMessageHash string `db:"tx_hash"` + ID string `db:"id"` + Client string `db:"client"` +} + +func NewWatcherDataSetCreate(db *harmonydb.DB, ethClient *ethclient.Client, pcs *chainsched.CurioChainSched) { + if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { + err := processPendingDataSetCreates(ctx, db, ethClient) + if err != nil { + log.Errorf("Failed to process pending data set creates: %s", err) + } + return nil + }); err != nil { + panic(err) + } +} + +func processPendingDataSetCreates(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { + // Query for pdp_data_set_create entries tx_hash is NOT NULL + var dataSetCreates []DataSetCreate + + err := db.Select(ctx, &dataSetCreates, ` + SELECT id, client, tx_hash + FROM pdp_data_set_create + WHERE tx_hash IS NOT NULL`) + if err != nil { + return xerrors.Errorf("failed to select data set creates: %w", err) + } + + if len(dataSetCreates) == 0 { + // No pending data set creates + return nil + } + + // Process each data set create + for _, dsc := range dataSetCreates { + err := processDataSetCreate(ctx, db, dsc, ethClient) + if err != nil { + log.Errorf("Failed to process data set create for tx %s: %s", dsc.CreateMessageHash, err) + continue + } + } + + return nil +} + +func processDataSetCreate(ctx context.Context, db *harmonydb.DB, dsc DataSetCreate, ethClient *ethclient.Client) error { + // Retrieve the tx_receipt from message_waits_eth + var txReceiptJSON []byte + var txSuccess bool + err := db.QueryRow(ctx, `SELECT tx_receipt, tx_success FROM message_waits_eth + WHERE signed_tx_hash = $1 + AND tx_success IS NOT NULL + AND tx_receipt IS NOT NULL`, dsc.CreateMessageHash).Scan(&txReceiptJSON, &txSuccess) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("tx hash %s is either missing from watch table or is not yet processed by watcher", dsc.CreateMessageHash) + } + return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", dsc.CreateMessageHash, err) + } + + // Unmarshal the tx_receipt JSON into types.Receipt + var txReceipt types.Receipt + err = json.Unmarshal(txReceiptJSON, &txReceipt) + if err != nil { + return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", dsc.CreateMessageHash, err) + } + + // Exit early if transaction executed with failure + if !txSuccess { + // This means msg failed, we should let the user know + // TODO: Review if error would be in receipt + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", dsc.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + _, err = tx.Exec(`DELETE FROM pdp_data_set_create WHERE id = $1`, dsc.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete pdp_data_set_create: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + return nil + } + + // Parse the logs to extract the dataSetId + dataSetId, err := extractDataSetIdFromReceipt(&txReceipt) + if err != nil { + return xerrors.Errorf("failed to extract dataSetId from receipt for tx %s: %w", dsc.CreateMessageHash, err) + } + + // Get the listener address for this data set from the PDPVerifier contract + pdpVerifier, err := contract.NewPDPVerifier(contract.ContractAddresses().PDPVerifier, ethClient) + if err != nil { + return xerrors.Errorf("failed to instantiate PDPVerifier contract: %w", err) + } + + listenerAddr, err := pdpVerifier.GetDataSetListener(nil, big.NewInt(int64(dataSetId))) + if err != nil { + return xerrors.Errorf("failed to get listener address for data set %d: %w", dataSetId, err) + } + + // Get the proving period from the listener + // Assumption: listener is a PDP Service with proving window informational methods + provingPeriod, challengeWindow, err := getProvingPeriodChallengeWindow(ctx, ethClient, listenerAddr) + if err != nil { + return xerrors.Errorf("failed to get max proving period: %w", err) + } + + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`INSERT INTO pdp_data_set (id, client, proving_period, challenge_window, create_deal_id, create_message_hash) + VALUES ($1, $2, $3, $4, $5, $6)`, dataSetId, dsc.Client, provingPeriod, challengeWindow, dsc.ID, dsc.CreateMessageHash) + if err != nil { + return false, xerrors.Errorf("failed to insert pdp_data_set_create: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be inserted, got %d", n) + } + + n, err = tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set(pdp_v1, '{complete}', 'true'::jsonb, true) + WHERE id = $1;`, dsc.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + _, err = tx.Exec(`DELETE FROM pdp_data_set_create WHERE id = $1`, dsc.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete pdp_data_set_create: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + + return nil +} + +func extractDataSetIdFromReceipt(receipt *types.Receipt) (uint64, error) { + pdpABI, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return 0, xerrors.Errorf("failed to get PDP ABI: %w", err) + } + + event, exists := pdpABI.Events["DataSetCreated"] + if !exists { + return 0, xerrors.Errorf("DataSetCreated event not found in ABI") + } + + for _, vLog := range receipt.Logs { + if len(vLog.Topics) > 0 && vLog.Topics[0] == event.ID { + if len(vLog.Topics) < 2 { + return 0, xerrors.Errorf("log does not contain setId topic") + } + + setIdBigInt := new(big.Int).SetBytes(vLog.Topics[1].Bytes()) + return setIdBigInt.Uint64(), nil + } + } + + return 0, xerrors.Errorf("DataSetCreated event not found in receipt") +} + +func getProvingPeriodChallengeWindow(ctx context.Context, ethClient *ethclient.Client, listenerAddr common.Address) (uint64, uint64, error) { + // Get the proving schedule from the listener (handles view contract indirection) + schedule, err := contract.GetProvingScheduleFromListener(listenerAddr, ethClient) + if err != nil { + return 0, 0, xerrors.Errorf("failed to get proving schedule from listener: %w", err) + } + + config, err := schedule.GetPDPConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return 0, 0, xerrors.Errorf("failed to get pdp config: %w", err) + } + + return config.MaxProvingPeriod, config.ChallengeWindow.Uint64(), nil +} diff --git a/tasks/pdp/data_set_delete_watch.go b/tasks/pdp/data_set_delete_watch.go new file mode 100644 index 000000000..c3efa7d36 --- /dev/null +++ b/tasks/pdp/data_set_delete_watch.go @@ -0,0 +1,165 @@ +package pdp + +import ( + "context" + "encoding/json" + "errors" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/chainsched" + + chainTypes "github.com/filecoin-project/lotus/chain/types" +) + +type DataSetDelete struct { + DeleteMessageHash string `db:"tx_hash"` + ID string `db:"id"` + PID int64 `db:"set_id"` +} + +func NewWatcherDelete(db *harmonydb.DB, pcs *chainsched.CurioChainSched) { + if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { + err := processPendingDataSetDeletes(ctx, db) + if err != nil { + log.Errorf("Failed to process pending data set creates: %s", err) + } + return nil + }); err != nil { + panic(err) + } +} + +func processPendingDataSetDeletes(ctx context.Context, db *harmonydb.DB) error { + // Query for pdp_data_set_delete where txHash is not NULL + var dataSetDeletes []DataSetDelete + + err := db.Select(ctx, &dataSetDeletes, ` + SELECT id, set_id, tx_hash + FROM pdp_data_set_delete + WHERE tx_hash IS NOT NULL`) + if err != nil { + return xerrors.Errorf("failed to select data set deletes: %w", err) + } + + if len(dataSetDeletes) == 0 { + // No pending data set creates + return nil + } + + // Process each data set delete + for _, psd := range dataSetDeletes { + err := processDataSetDelete(ctx, db, psd) + if err != nil { + log.Errorf("Failed to process data set delete for tx %s: %s", psd.DeleteMessageHash, err) + continue + } + } + + return nil +} + +func processDataSetDelete(ctx context.Context, db *harmonydb.DB, psd DataSetDelete) error { + // Retrieve the tx_receipt from message_waits_eth + var txReceiptJSON []byte + var txSuccess bool + err := db.QueryRow(ctx, `SELECT tx_receipt, tx_success FROM message_waits_eth WHERE signed_tx_hash = $1 + AND tx_success IS NOT NULL + AND tx_receipt IS NOT NULL`, psd.DeleteMessageHash).Scan(&txReceiptJSON, &txSuccess) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("tx hash %s is either missing from watch table or is not yet processed by watcher", psd.DeleteMessageHash) + } + return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", psd.DeleteMessageHash, err) + } + + // Unmarshal the tx_receipt JSON into types.Receipt + var txReceipt types.Receipt + err = json.Unmarshal(txReceiptJSON, &txReceipt) + if err != nil { + return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", psd.DeleteMessageHash, err) + } + + // Exit early if transaction executed with failure + if !txSuccess { + // This means msg failed, we should let the user know + // TODO: Review if error would be in receipt + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + _, err = tx.Exec(`DELETE FROM pdp_data_set_delete WHERE id = $1`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete row from pdp_data_set_delete: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + return nil + } + + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + + n, err := tx.Exec(`UPDATE pdp_data_set SET removed = TRUE, + remove_deal_id = $1, + remove_message_hash = $2 + WHERE id = $3`, psd.ID, psd.DeleteMessageHash, psd.PID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + _, err = tx.Exec(`UPDATE pdp_dataset_piece SET removed = TRUE, + remove_deal_id = $1, + remove_message_hash = $2 + WHERE data_set_id = $3`, psd.ID, psd.DeleteMessageHash, psd.PID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_dataset_piece: %w", err) + } + + _, err = tx.Exec(`DELETE FROM pdp_data_set_delete WHERE id = $1`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete row from pdp_data_set_delete: %w", err) + } + + n, err = tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set(pdp_v1, '{complete}', 'true'::jsonb, true) + WHERE id = $1;`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + return true, nil + }) + + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + + return nil +} diff --git a/tasks/pdp/dataset_add_piece_watch.go b/tasks/pdp/dataset_add_piece_watch.go new file mode 100644 index 000000000..7a6cb6a3a --- /dev/null +++ b/tasks/pdp/dataset_add_piece_watch.go @@ -0,0 +1,261 @@ +package pdp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ipfs/go-cid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/chainsched" + "github.com/filecoin-project/curio/pdp/contract" + + chainTypes "github.com/filecoin-project/lotus/chain/types" +) + +// Structures to represent database records +type DataSetPieceAdd struct { + ID string `db:"id"` + Client string `db:"client"` + PieceCID2 string `db:"piece_cid_v2"` // pieceCIDV2 + DataSet uint64 `db:"data_set_id"` + PieceRef int64 `db:"piece_ref"` + AddMessageHash string `db:"add_message_hash"` + AddMessageIndex int64 `db:"add_message_index"` +} + +// NewWatcherPieceAdd sets up the watcher for data set piece additions +func NewWatcherPieceAdd(db *harmonydb.DB, pcs *chainsched.CurioChainSched, ethClient *ethclient.Client) { + if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { + err := processPendingDataSetPieceAdds(ctx, db, ethClient) + if err != nil { + log.Errorf("Failed to process pending data set piece adds: %s", err) + } + + return nil + }); err != nil { + panic(err) + } +} + +// processPendingDataSetPieceAdds processes piece additions that have been confirmed on-chain +func processPendingDataSetPieceAdds(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { + // Query for pdp_dataset_piece_adds entries where add_message_ok = TRUE + var pieceAdds []DataSetPieceAdd + + err := db.Select(ctx, &pieceAdds, ` + SELECT id, client, piece_cid_v2, data_set_id, piece_ref, add_message_hash, add_message_index + FROM pdp_pipeline + WHERE after_add_piece = TRUE AND after_add_piece_msg = FALSE + `) + if err != nil { + return xerrors.Errorf("failed to select data set piece adds: %w", err) + } + + if len(pieceAdds) == 0 { + // No pending root adds + return nil + } + + // Process each piece addition + for _, pieceAdd := range pieceAdds { + err := processDataSetPieceAdd(ctx, db, pieceAdd, ethClient) + if err != nil { + log.Errorf("Failed to process piece add for tx %s: %s", pieceAdd.AddMessageHash, err) + continue + } + } + + return nil +} + +func processDataSetPieceAdd(ctx context.Context, db *harmonydb.DB, pieceAdd DataSetPieceAdd, ethClient *ethclient.Client) error { + // Retrieve the tx_receipt from message_waits_eth + var txReceiptJSON []byte + var txSuccess bool + err := db.QueryRow(ctx, `SELECT tx_success, tx_receipt FROM message_waits_eth WHERE signed_tx_hash = $1 + AND tx_success IS NOT NULL + AND tx_receipt IS NOT NULL`, pieceAdd.AddMessageHash).Scan(&txSuccess, &txReceiptJSON) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("tx hash %s is either missing from watch table or is not yet processed by watcher", pieceAdd.AddMessageHash) + } + return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", pieceAdd.AddMessageHash, err) + } + + // Unmarshal the tx_receipt JSON into types.Receipt + var txReceipt types.Receipt + err = json.Unmarshal(txReceiptJSON, &txReceipt) + if err != nil { + return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", pieceAdd.AddMessageHash, err) + } + + if !txSuccess { + // This means msg failed, we should let the user know + // TODO: Review if error would be in receipt + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", pieceAdd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + _, err = tx.Exec(`DELETE FROM pdp_pipeline WHERE id = $1`, pieceAdd.ID) + if err != nil { + return false, xerrors.Errorf("failed to clean up pdp pipeline: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + return nil + } + + // Get the ABI from the contract metadata + pdpABI, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return fmt.Errorf("failed to get PDP ABI: %w", err) + } + + // Get the event definition + event, exists := pdpABI.Events["PiecesAdded"] + if !exists { + return fmt.Errorf("PiecesAdded event not found in ABI") + } + + var pieceIds []uint64 + var pieceCids [][]byte + eventFound := false + + pcid2, err := cid.Parse(pieceAdd.PieceCID2) + if err != nil { + return fmt.Errorf("failed to parse piece CID: %w", err) + } + + parser, err := contract.NewPDPVerifierFilterer(contract.ContractAddresses().PDPVerifier, ethClient) + if err != nil { + return fmt.Errorf("failed to create PDPVerifierFilterer: %w", err) + } + + // Iterate over the logs in the receipt + for _, vLog := range txReceipt.Logs { + // Check if the log corresponds to the PiecesAdded event + if len(vLog.Topics) > 0 && vLog.Topics[0] == event.ID { + // The setId is an indexed parameter in Topics[1], but we don't need it here + // as we already have the dataset ID from the database + + parsed, err := parser.ParsePiecesAdded(*vLog) + if err != nil { + return fmt.Errorf("failed to parse event log: %w", err) + } + + pieceIds = make([]uint64, len(parsed.PieceIds)) + for i := range parsed.PieceIds { + pieceIds[i] = parsed.PieceIds[i].Uint64() + } + + pieceCids = make([][]byte, len(parsed.PieceCids)) + for i := range parsed.PieceCids { + pieceCids[i] = parsed.PieceCids[i].Data + } + + eventFound = true + // We found the event, so we can break the loop + break + } + } + + if !eventFound { + return fmt.Errorf("PiecesAdded event not found in receipt") + } + + pieceId := pieceIds[pieceAdd.AddMessageIndex] + pieceCid := pieceCids[pieceAdd.AddMessageIndex] + + apcid2, err := cid.Cast(pieceCid) + if err != nil { + return fmt.Errorf("failed to cast piece CID: %w", err) + } + + if !apcid2.Equals(pcid2) { + return fmt.Errorf("piece CID in event log does not match piece CID in message") + } + + // Insert into message_waits_eth and pdp_dataset_pieces + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + // Update data set for initialization upon first add + _, err = tx.Exec(` + UPDATE pdp_data_set SET init_ready = true + WHERE id = $1 AND prev_challenge_request_epoch IS NULL AND challenge_request_msg_hash IS NULL AND prove_at_epoch IS NULL + `, pieceAdd.DataSet) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) + } + + // Insert into pdp_dataset_piece + n, err := tx.Exec(` + INSERT INTO pdp_dataset_piece ( + data_set_id, + client, + piece_cid_v2, + piece, + piece_ref, + add_deal_id, + add_message_hash, + add_message_index + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + `, + pieceAdd.DataSet, + pieceAdd.Client, + pieceAdd.PieceCID2, + pieceId, + pieceAdd.PieceRef, + pieceAdd.ID, + pieceAdd.AddMessageHash, + pieceAdd.AddMessageIndex, + ) + if err != nil { + return false, xerrors.Errorf("failed to insert into pdp_dataset_piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows inserted for pdp_dataset_piece: %d", n) + } + + n, err = tx.Exec(`UPDATE pdp_pipeline SET after_add_piece_msg = TRUE WHERE id = $1`, pieceAdd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows updated for pdp_pipeline: %d", n) + } + + // Return true to commit the transaction + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("failed to save details to DB: %w", err) + } + + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + + return nil +} diff --git a/tasks/pdp/dataset_delete_root_watch.go b/tasks/pdp/dataset_delete_root_watch.go new file mode 100644 index 000000000..b0a012278 --- /dev/null +++ b/tasks/pdp/dataset_delete_root_watch.go @@ -0,0 +1,151 @@ +package pdp + +import ( + "context" + "encoding/json" + "errors" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/chainsched" + + chainTypes "github.com/filecoin-project/lotus/chain/types" +) + +type DataSetPieceDelete struct { + ID string `db:"id"` + DataSet uint64 `db:"set_id"` + Pieces []int64 `db:"pieces"` + Hash string `db:"tx_hash"` +} + +func NewWatcherPieceDelete(db *harmonydb.DB, pcs *chainsched.CurioChainSched) { + if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { + err := processPendingDataSetPieceDeletes(ctx, db) + if err != nil { + log.Errorf("Failed to process pending data set creates: %s", err) + } + return nil + }); err != nil { + panic(err) + } +} + +func processPendingDataSetPieceDeletes(ctx context.Context, db *harmonydb.DB) error { + var dataSetPieceDeletes []DataSetPieceDelete + err := db.Select(ctx, &dataSetPieceDeletes, ` + SELECT id, tx_hash, pieces, set_id FROM pdp_piece_delete WHERE tx_hash IS NOT NULL`) + if err != nil { + return xerrors.Errorf("failed to select data set piece deletes: %w", err) + } + + if len(dataSetPieceDeletes) == 0 { + return nil + } + + for _, psd := range dataSetPieceDeletes { + err := processDataSetPieceDelete(ctx, db, psd) + if err != nil { + log.Errorf("Failed to process data set piece delete for tx %s: %s", psd.Hash, err) + continue + } + } + + return nil +} + +func processDataSetPieceDelete(ctx context.Context, db *harmonydb.DB, psd DataSetPieceDelete) error { + var txReceiptJSON []byte + var txSuccess bool + err := db.QueryRow(ctx, `SELECT tx_receipt, tx_success FROM message_waits_eth WHERE signed_tx_hash = $1 + AND tx_success IS NOT NULL + AND tx_receipt IS NOT NULL`, psd.Hash).Scan(&txReceiptJSON, &txSuccess) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("tx hash %s is either missing from watch table or is not yet processed by watcher", psd.Hash) + } + return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", psd.Hash, err) + } + + var txReceipt types.Receipt + err = json.Unmarshal(txReceiptJSON, &txReceipt) + if err != nil { + return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", psd.Hash, err) + } + + if !txSuccess { + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + _, err = tx.Exec(`DELETE FROM pdp_piece_delete WHERE id = $1`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete row from pdp_piece_delete: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + return nil + } + + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE pdp_dataset_piece SET removed = TRUE, + remove_deal_id = $1, + remove_message_hash = $2 + WHERE data_set_id = $3 AND piece = ANY($4)`, psd.ID, psd.Hash, psd.DataSet, psd.Pieces) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_dataset_piece: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + n, err = tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set(pdp_v1, '{complete}', 'true'::jsonb, true) + WHERE id = $1;`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + _, err = tx.Exec(`DELETE FROM pdp_piece_delete WHERE id = $1`, psd.ID) + if err != nil { + return false, xerrors.Errorf("failed to delete row from pdp_piece_delete: %w", err) + } + _, err = tx.Exec(`INSERT INTO piece_cleanup (id, piece_cid_v2, pdp) + SELECT p.add_deal_id, p.piece_cid_v2, TRUE + FROM pdp_dataset_piece AS p + WHERE p.data_set_id = $1 + AND p.piece = ANY($2) + ON CONFLICT (id, pdp) DO NOTHING;`, psd.DataSet, psd.Pieces) + if err != nil { + return false, xerrors.Errorf("failed to insert into piece_cleanup: %w", err) + } + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return xerrors.Errorf("failed to commit transaction: %w", err) + } + if !comm { + return xerrors.Errorf("failed to commit transaction") + } + return nil +} diff --git a/tasks/pdp/proofset_addroot_watch.go b/tasks/pdp/proofset_addroot_watch.go deleted file mode 100644 index db042dd5f..000000000 --- a/tasks/pdp/proofset_addroot_watch.go +++ /dev/null @@ -1,234 +0,0 @@ -package pdp - -import ( - "context" - "encoding/json" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "golang.org/x/xerrors" - - "github.com/filecoin-project/curio/harmony/harmonydb" - "github.com/filecoin-project/curio/lib/chainsched" - "github.com/filecoin-project/curio/pdp/contract" - - chainTypes "github.com/filecoin-project/lotus/chain/types" -) - -// Structures to represent database records -type ProofSetRootAdd struct { - ProofSet uint64 `db:"proofset"` - AddMessageHash string `db:"add_message_hash"` -} - -// RootAddEntry represents entries from pdp_proofset_root_adds -type RootAddEntry struct { - ProofSet uint64 `db:"proofset"` - Root string `db:"root"` - AddMessageHash string `db:"add_message_hash"` - AddMessageIndex uint64 `db:"add_message_index"` - Subroot string `db:"subroot"` - SubrootOffset int64 `db:"subroot_offset"` - SubrootSize int64 `db:"subroot_size"` - PDPPieceRefID int64 `db:"pdp_pieceref"` - AddMessageOK *bool `db:"add_message_ok"` - PDPProofSetID uint64 `db:"proofset"` -} - -// NewWatcherRootAdd sets up the watcher for proof set root additions -func NewWatcherRootAdd(db *harmonydb.DB, ethClient *ethclient.Client, pcs *chainsched.CurioChainSched) { - if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { - err := processPendingProofSetRootAdds(ctx, db, ethClient) - if err != nil { - log.Warnf("Failed to process pending proof set root adds: %v", err) - } - - return nil - }); err != nil { - panic(err) - } -} - -// processPendingProofSetRootAdds processes root additions that have been confirmed on-chain -func processPendingProofSetRootAdds(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { - // Query for pdp_proofset_root_adds entries where add_message_ok = TRUE - var rootAdds []ProofSetRootAdd - - err := db.Select(ctx, &rootAdds, ` - SELECT DISTINCT proofset, add_message_hash - FROM pdp_proofset_root_adds - WHERE add_message_ok = TRUE AND roots_added = FALSE - `) - if err != nil { - return xerrors.Errorf("failed to select proof set root adds: %w", err) - } - - if len(rootAdds) == 0 { - // No pending root adds - return nil - } - - // Process each root addition - for _, rootAdd := range rootAdds { - err := processProofSetRootAdd(ctx, db, ethClient, rootAdd) - if err != nil { - log.Warnf("Failed to process root add for tx %s: %v", rootAdd.AddMessageHash, err) - continue - } - } - - return nil -} - -func processProofSetRootAdd(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client, rootAdd ProofSetRootAdd) error { - // Retrieve the tx_receipt from message_waits_eth - var txReceiptJSON []byte - err := db.QueryRow(ctx, ` - SELECT tx_receipt - FROM message_waits_eth - WHERE signed_tx_hash = $1 - `, rootAdd.AddMessageHash).Scan(&txReceiptJSON) - if err != nil { - return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", rootAdd.AddMessageHash, err) - } - - // Unmarshal the tx_receipt JSON into types.Receipt - var txReceipt types.Receipt - err = json.Unmarshal(txReceiptJSON, &txReceipt) - if err != nil { - return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", rootAdd.AddMessageHash, err) - } - - // Parse the logs to extract root IDs and other data - err = extractAndInsertRootsFromReceipt(ctx, db, &txReceipt, rootAdd) - if err != nil { - return xerrors.Errorf("failed to extract roots from receipt for tx %s: %w", rootAdd.AddMessageHash, err) - } - - return nil -} - -func extractAndInsertRootsFromReceipt(ctx context.Context, db *harmonydb.DB, receipt *types.Receipt, rootAdd ProofSetRootAdd) error { - // Get the ABI from the contract metadata - pdpABI, err := contract.PDPVerifierMetaData.GetAbi() - if err != nil { - return fmt.Errorf("failed to get PDP ABI: %w", err) - } - - // Get the event definition - event, exists := pdpABI.Events["RootsAdded"] - if !exists { - return fmt.Errorf("RootsAdded event not found in ABI") - } - - var rootIds []uint64 - eventFound := false - - // Iterate over the logs in the receipt - for _, vLog := range receipt.Logs { - // Check if the log corresponds to the RootsAdded event - if len(vLog.Topics) > 0 && vLog.Topics[0] == event.ID { - // The setId is an indexed parameter in Topics[1], but we don't need it here - // as we already have the proofset ID from the database - - // Parse the non-indexed parameter (rootIds array) from the data - unpacked, err := event.Inputs.Unpack(vLog.Data) - if err != nil { - return fmt.Errorf("failed to unpack log data: %w", err) - } - - // Extract the rootIds array - if len(unpacked) == 0 { - return fmt.Errorf("no unpacked data found in log") - } - - // Convert the unpacked rootIds ([]interface{} containing *big.Int) to []uint64 - bigIntRootIds, ok := unpacked[0].([]*big.Int) - if !ok { - return fmt.Errorf("failed to convert unpacked data to array") - } - - rootIds = make([]uint64, len(bigIntRootIds)) - for i := range bigIntRootIds { - rootIds[i] = bigIntRootIds[i].Uint64() - } - - eventFound = true - // We found the event, so we can break the loop - break - } - } - - if !eventFound { - return fmt.Errorf("RootsAdded event not found in receipt") - } - - // Now we have the firstAdded rootId, proceed with database operations - - // Begin a database transaction - _, err = db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - // Fetch the entries from pdp_proofset_root_adds - var rootAddEntries []RootAddEntry - err := tx.Select(&rootAddEntries, ` - SELECT proofset, root, add_message_hash, add_message_index, subroot, subroot_offset, subroot_size, pdp_pieceref - FROM pdp_proofset_root_adds - WHERE proofset = $1 AND add_message_hash = $2 - ORDER BY add_message_index ASC, subroot_offset ASC - `, rootAdd.ProofSet, rootAdd.AddMessageHash) - if err != nil { - return false, fmt.Errorf("failed to select from pdp_proofset_root_adds: %w", err) - } - - // For each entry, use the corresponding rootId from the event - for _, entry := range rootAddEntries { - if entry.AddMessageIndex >= uint64(len(rootIds)) { - return false, fmt.Errorf("index out of bounds: entry index %d exceeds rootIds length %d", - entry.AddMessageIndex, len(rootIds)) - } - - rootId := rootIds[entry.AddMessageIndex] - // Insert into pdp_proofset_roots - _, err := tx.Exec(` - INSERT INTO pdp_proofset_roots ( - proofset, - root, - root_id, - subroot, - subroot_offset, - subroot_size, - pdp_pieceref, - add_message_hash, - add_message_index - ) VALUES ( - $1, $2, $3, $4, $5, $6, $7, $8, $9 - ) - `, entry.ProofSet, entry.Root, rootId, entry.Subroot, entry.SubrootOffset, entry.SubrootSize, entry.PDPPieceRefID, entry.AddMessageHash, entry.AddMessageIndex) - if err != nil { - return false, fmt.Errorf("failed to insert into pdp_proofset_roots: %w", err) - } - } - - // Mark as processed in pdp_proofset_root_adds (don't delete, for transaction tracking) - rowsAffected, err := tx.Exec(` - UPDATE pdp_proofset_root_adds - SET roots_added = TRUE - WHERE proofset = $1 AND add_message_hash = $2 AND roots_added = FALSE - `, rootAdd.ProofSet, rootAdd.AddMessageHash) - if err != nil { - return false, fmt.Errorf("failed to update pdp_proofset_root_adds: %w", err) - } - - if int(rowsAffected) != len(rootAddEntries) { - return false, fmt.Errorf("expected to update %d rows in pdp_proofset_root_adds but updated %d", len(rootAddEntries), rowsAffected) - } - - return true, nil - }) - if err != nil { - return fmt.Errorf("failed to process root additions in DB: %w", err) - } - - return nil -} diff --git a/tasks/pdp/proofset_create_watch.go b/tasks/pdp/proofset_create_watch.go deleted file mode 100644 index 2ed78883d..000000000 --- a/tasks/pdp/proofset_create_watch.go +++ /dev/null @@ -1,194 +0,0 @@ -package pdp - -import ( - "context" - "encoding/json" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "golang.org/x/xerrors" - - "github.com/filecoin-project/curio/harmony/harmonydb" - "github.com/filecoin-project/curio/lib/chainsched" - "github.com/filecoin-project/curio/pdp/contract" - - chainTypes "github.com/filecoin-project/lotus/chain/types" -) - -type ProofSetCreate struct { - CreateMessageHash string `db:"create_message_hash"` - Service string `db:"service"` -} - -func NewWatcherCreate(db *harmonydb.DB, ethClient *ethclient.Client, pcs *chainsched.CurioChainSched) { - if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { - err := processPendingProofSetCreates(ctx, db, ethClient) - if err != nil { - log.Warnf("Failed to process pending proof set creates: %v", err) - } - return nil - }); err != nil { - panic(err) - } -} - -func processPendingProofSetCreates(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { - // Query for pdp_proofset_creates entries where ok = TRUE and proofset_created = FALSE - var proofSetCreates []ProofSetCreate - - err := db.Select(ctx, &proofSetCreates, ` - SELECT create_message_hash, service - FROM pdp_proofset_creates - WHERE ok = TRUE AND proofset_created = FALSE - `) - if err != nil { - return xerrors.Errorf("failed to select proof set creates: %w", err) - } - - log.Infow("ProofSetCreate watcher checking pending proof sets", "count", len(proofSetCreates)) - - if len(proofSetCreates) == 0 { - // No pending proof set creates - return nil - } - - // Process each proof set create - for _, psc := range proofSetCreates { - log.Infow("Processing proof set create", - "txHash", psc.CreateMessageHash, - "service", psc.Service) - err := processProofSetCreate(ctx, db, psc, ethClient) - if err != nil { - log.Warnf("Failed to process proof set create for tx %s: %v", psc.CreateMessageHash, err) - continue - } - log.Infow("Successfully processed proof set create", "txHash", psc.CreateMessageHash) - } - - return nil -} - -func processProofSetCreate(ctx context.Context, db *harmonydb.DB, psc ProofSetCreate, ethClient *ethclient.Client) error { - // Retrieve the tx_receipt from message_waits_eth - var txReceiptJSON []byte - log.Debugw("Fetching tx_receipt from message_waits_eth", "txHash", psc.CreateMessageHash) - err := db.QueryRow(ctx, ` - SELECT tx_receipt - FROM message_waits_eth - WHERE signed_tx_hash = $1 - `, psc.CreateMessageHash).Scan(&txReceiptJSON) - if err != nil { - return xerrors.Errorf("failed to get tx_receipt for tx %s: %w", psc.CreateMessageHash, err) - } - log.Debugw("Retrieved tx_receipt", "txHash", psc.CreateMessageHash, "receiptLength", len(txReceiptJSON)) - - // Unmarshal the tx_receipt JSON into types.Receipt - var txReceipt types.Receipt - err = json.Unmarshal(txReceiptJSON, &txReceipt) - if err != nil { - return xerrors.Errorf("failed to unmarshal tx_receipt for tx %s: %w", psc.CreateMessageHash, err) - } - log.Debugw("Unmarshalled receipt", "txHash", psc.CreateMessageHash, "status", txReceipt.Status, "logs", len(txReceipt.Logs)) - - // Parse the logs to extract the proofSetId - proofSetId, err := extractProofSetIdFromReceipt(&txReceipt) - if err != nil { - return xerrors.Errorf("failed to extract proofSetId from receipt for tx %s: %w", psc.CreateMessageHash, err) - } - log.Infow("Extracted proofSetId from receipt", "txHash", psc.CreateMessageHash, "proofSetId", proofSetId) - - // Get the listener address for this proof set from the PDPVerifier contract - pdpVerifier, err := contract.NewPDPVerifier(contract.ContractAddresses().PDPVerifier, ethClient) - if err != nil { - return xerrors.Errorf("failed to instantiate PDPVerifier contract: %w", err) - } - - listenerAddr, err := pdpVerifier.GetProofSetListener(nil, big.NewInt(int64(proofSetId))) - if err != nil { - return xerrors.Errorf("failed to get listener address for proof set %d: %w", proofSetId, err) - } - - // Get the proving period from the listener - // Assumption: listener is a PDP Service with proving window informational methods - provingPeriod, challengeWindow, err := getProvingPeriodChallengeWindow(ctx, ethClient, listenerAddr) - if err != nil { - return xerrors.Errorf("failed to get max proving period: %w", err) - } - - // Insert a new entry into pdp_proof_sets - err = insertProofSet(ctx, db, psc.CreateMessageHash, proofSetId, psc.Service, provingPeriod, challengeWindow) - if err != nil { - return xerrors.Errorf("failed to insert proof set %d for tx %+v: %w", proofSetId, psc, err) - } - - // Update pdp_proofset_creates to set proofset_created = TRUE - _, err = db.Exec(ctx, ` - UPDATE pdp_proofset_creates - SET proofset_created = TRUE - WHERE create_message_hash = $1 - `, psc.CreateMessageHash) - if err != nil { - return xerrors.Errorf("failed to update proofset_creates for tx %s: %w", psc.CreateMessageHash, err) - } - - return nil -} - -func extractProofSetIdFromReceipt(receipt *types.Receipt) (uint64, error) { - pdpABI, err := contract.PDPVerifierMetaData.GetAbi() - if err != nil { - return 0, xerrors.Errorf("failed to get PDP ABI: %w", err) - } - - event, exists := pdpABI.Events["ProofSetCreated"] - if !exists { - return 0, xerrors.Errorf("ProofSetCreated event not found in ABI") - } - - for _, vLog := range receipt.Logs { - if len(vLog.Topics) > 0 && vLog.Topics[0] == event.ID { - if len(vLog.Topics) < 2 { - return 0, xerrors.Errorf("log does not contain setId topic") - } - - setIdBigInt := new(big.Int).SetBytes(vLog.Topics[1].Bytes()) - return setIdBigInt.Uint64(), nil - } - } - - return 0, xerrors.Errorf("ProofSetCreated event not found in receipt") -} - -func insertProofSet(ctx context.Context, db *harmonydb.DB, createMsg string, proofSetId uint64, service string, provingPeriod uint64, challengeWindow uint64) error { - // Implement the insertion into pdp_proof_sets table - // Adjust the SQL statement based on your table schema - _, err := db.Exec(ctx, ` - INSERT INTO pdp_proof_sets (id, create_message_hash, service, proving_period, challenge_window) - VALUES ($1, $2, $3, $4, $5) - `, proofSetId, createMsg, service, provingPeriod, challengeWindow) - return err -} - -func getProvingPeriodChallengeWindow(ctx context.Context, ethClient *ethclient.Client, listenerAddr common.Address) (uint64, uint64, error) { - // ProvingPeriod - schedule, err := contract.NewIPDPProvingSchedule(listenerAddr, ethClient) - if err != nil { - return 0, 0, xerrors.Errorf("failed to create proving schedule binding, check that listener has proving schedule methods: %w", err) - } - - period, err := schedule.GetMaxProvingPeriod(&bind.CallOpts{Context: ctx}) - if err != nil { - return 0, 0, xerrors.Errorf("failed to get proving period: %w", err) - } - - // ChallengeWindow - challengeWindow, err := schedule.ChallengeWindow(&bind.CallOpts{Context: ctx}) - if err != nil { - return 0, 0, xerrors.Errorf("failed to get challenge window: %w", err) - } - - return period, challengeWindow.Uint64(), nil -} diff --git a/tasks/pdp/task_add_data_set.go b/tasks/pdp/task_add_data_set.go new file mode 100644 index 000000000..226841b08 --- /dev/null +++ b/tasks/pdp/task_add_data_set.go @@ -0,0 +1,201 @@ +package pdp + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/pdp/contract" + "github.com/filecoin-project/curio/tasks/message" +) + +type PDPTaskAddDataSet struct { + db *harmonydb.DB + sender *message.SenderETH + ethClient *ethclient.Client + filClient PDPServiceNodeApi +} + +func NewPDPTaskAddDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client, filClient PDPServiceNodeApi) *PDPTaskAddDataSet { + return &PDPTaskAddDataSet{ + db: db, + sender: sender, + ethClient: ethClient, + filClient: filClient, + } +} + +func (p *PDPTaskAddDataSet) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + var pcreates []struct { + RecordKeeper string `db:"record_keeper"` + ExtraData []byte `db:"extra_data"` + } + + err = p.db.Select(ctx, &pcreates, `SELECT record_keeper, extra_data FROM pdp_data_set_create WHERE task_id = $1 AND tx_hash IS NULL`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to get task details from DB: %w", err) + } + + if len(pcreates) != 1 { + return false, xerrors.Errorf("incorrect rows for dataset create found for taskID %d", taskID) + } + + pcreate := pcreates[0] + + recordKeeperAddr := common.HexToAddress(pcreate.RecordKeeper) + if recordKeeperAddr == (common.Address{}) { + return false, xerrors.Errorf("invalid record keeper address: %s", pcreate.RecordKeeper) + } + + extraDataBytes := []byte{} + + if pcreate.ExtraData != nil { + extraDataBytes = pcreate.ExtraData + } + + // Get the sender address from 'eth_keys' table where role = 'pdp' limit 1 + fromAddress, err := p.getSenderAddress(ctx) + if err != nil { + return false, xerrors.Errorf("failed to get sender address: %w", err) + } + + // Manually create the transaction without requiring a Signer + // Obtain the ABI of the PDPVerifier contract + abiData, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return false, xerrors.Errorf("getting PDPVerifier ABI: %w", err) + } + + // Pack the method call data + data, err := abiData.Pack("createDataSet", recordKeeperAddr, extraDataBytes) + if err != nil { + return false, xerrors.Errorf("packing data: %w", err) + } + + // Prepare the transaction (nonce will be set to 0, SenderETH will assign it) + tx := types.NewTransaction( + 0, + contract.ContractAddresses().PDPVerifier, + contract.SybilFee(), + 0, + nil, + data, + ) + + // Send the transaction using SenderETH + reason := "pdp-create-data-set" + txHash, err := p.sender.Send(ctx, fromAddress, tx, reason) + if err != nil { + return false, xerrors.Errorf("sending transaction: %w", err) + } + + // Insert into message_waits_eth and pdp_data_set_create + txHashLower := strings.ToLower(txHash.Hex()) + comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE pdp_data_set_create SET tx_hash = $1, task_id = NULL WHERE task_id = $2`, txHashLower, taskID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set_create: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows updated for pdp_data_set_create: %d", n) + } + _, err = tx.Exec(`INSERT INTO message_waits_eth (signed_tx_hash, tx_status) VALUES ($1, $2)`, txHashLower, "pending") + if err != nil { + return false, xerrors.Errorf("failed to insert into message_waits_eth: %w", err) + } + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return false, xerrors.Errorf("failed to commit transaction: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit transaction") + } + + return true, nil +} + +func (p *PDPTaskAddDataSet) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PDPTaskAddDataSet) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PDPAddDataSet", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(3*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return p.schedule(context.Background(), taskFunc) + }), + } +} + +func (p *PDPTaskAddDataSet) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_data_set_create WHERE task_id IS NULL AND tx_hash IS NULL LIMIT 1`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_data_set_create: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid id found for taskID") + } + + _, err = tx.Exec(`UPDATE pdp_data_set_create SET task_id = $1 WHERE id = $2 AND tx_hash IS NULL`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set_create: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +// getSenderAddress retrieves the sender address from the database where role = 'pdp' limit 1 +func (p *PDPTaskAddDataSet) getSenderAddress(ctx context.Context) (common.Address, error) { + var addressStr string + err := p.db.QueryRow(ctx, `SELECT address FROM eth_keys WHERE role = 'pdp' LIMIT 1`).Scan(&addressStr) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return common.Address{}, errors.New("no sender address with role 'pdp' found") + } + return common.Address{}, err + } + address := common.HexToAddress(addressStr) + return address, nil +} + +func (p *PDPTaskAddDataSet) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &PDPTaskAddDataSet{} +var _ = harmonytask.Reg(&PDPTaskAddDataSet{}) diff --git a/tasks/pdp/task_add_piece.go b/tasks/pdp/task_add_piece.go new file mode 100644 index 000000000..b22b4b153 --- /dev/null +++ b/tasks/pdp/task_add_piece.go @@ -0,0 +1,227 @@ +package pdp + +import ( + "context" + "errors" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ipfs/go-cid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/pdp/contract" + "github.com/filecoin-project/curio/tasks/message" + + types2 "github.com/filecoin-project/lotus/chain/types" +) + +type PDPServiceNodeApi interface { + ChainHead(ctx context.Context) (*types2.TipSet, error) +} + +type PDPTaskAddPiece struct { + db *harmonydb.DB + sender *message.SenderETH + ethClient *ethclient.Client +} + +func NewPDPTaskAddPiece(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client) *PDPTaskAddPiece { + return &PDPTaskAddPiece{ + db: db, + sender: sender, + ethClient: ethClient, + } +} + +func (p *PDPTaskAddPiece) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var addPieces []struct { + ID string `db:"id"` + PieceCid2 string `db:"piece_cid_v2"` + DataSetID int64 `db:"data_set_id"` + ExtraData []byte `db:"extra_data"` + PieceRef string `db:"piece_ref"` + } + + err = p.db.Select(ctx, &addPieces, `SELECT id, piece_cid_v2, data_set_id, extra_data, piece_ref FROM pdp_pipeline WHERE add_piece_task_id = $1 AND after_add_piece = FALSE`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to select add piece: %w", err) + } + + if len(addPieces) == 0 { + return false, xerrors.Errorf("no add piece found for taskID %d", taskID) + } + + if len(addPieces) > 1 { + return false, xerrors.Errorf("multiple add piece found for taskID %d", taskID) + } + + addPiece := addPieces[0] + + pcid2, err := cid.Parse(addPiece.PieceCid2) + if err != nil { + return false, xerrors.Errorf("failed to parse piece cid: %w", err) + } + + // Prepare the Ethereum transaction data outside the DB transaction + // Obtain the ABI of the PDPVerifier contract + abiData, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return false, xerrors.Errorf("getting PDPVerifier ABI: %w", err) + } + + pieceDataArray := []contract.CidsCid{ + { + Data: pcid2.Bytes(), + }, + } + + dataSetID := new(big.Int).SetUint64(uint64(addPiece.DataSetID)) + + // Prepare the Ethereum transaction + // Pack the method call data + // The extraDataBytes variable is now correctly populated above + data, err := abiData.Pack("addPieces", dataSetID, pieceDataArray, addPiece.ExtraData) + if err != nil { + return false, xerrors.Errorf("packing data: %w", err) + } + + callOpts := &bind.CallOpts{ + Context: ctx, + } + + pdpVerifierAddress := contract.ContractAddresses().PDPVerifier + + pdpVerifier, err := contract.NewPDPVerifier(pdpVerifierAddress, p.ethClient) + if err != nil { + return false, xerrors.Errorf("failed to instantiate PDPVerifier contract at %s: %w", pdpVerifierAddress.Hex(), err) + } + + // Get the sender address for this dataset + owner, _, err := pdpVerifier.GetDataSetStorageProvider(callOpts, dataSetID) + if err != nil { + return false, xerrors.Errorf("failed to get owner: %w", err) + } + + // Prepare the transaction (nonce will be set to 0, SenderETH will assign it) + txEth := types.NewTransaction( + 0, + contract.ContractAddresses().PDPVerifier, + big.NewInt(0), + 0, + nil, + data, + ) + + // Send the transaction using SenderETH + reason := "pdp-add-piece" + txHash, err := p.sender.Send(ctx, owner, txEth, reason) + if err != nil { + return false, xerrors.Errorf("sending transaction: %w", err) + } + + txHashLower := strings.ToLower(txHash.Hex()) + + // Insert into message_waits_eth and pdp_dataset_piece + _, err = p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + // Insert into message_waits_eth + _, err = tx.Exec(` + INSERT INTO message_waits_eth (signed_tx_hash, tx_status) + VALUES ($1, $2) + `, txHashLower, "pending") + if err != nil { + return false, xerrors.Errorf("failed to insert into message_waits_eth: %w", err) + } + + n, err := tx.Exec(`UPDATE pdp_pipeline SET + after_add_piece = TRUE, + add_piece_task_id = NULL, + add_message_hash = $2 + WHERE add_piece_task_id = $1`, taskID, txHashLower) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows updated for pdp_pipeline: %d", n) + } + + // Return true to commit the transaction + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("failed to save details to DB: %w", err) + } + return true, nil +} + +func (p *PDPTaskAddPiece) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PDPTaskAddPiece) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PDPAddPiece", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(5*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return p.schedule(context.Background(), taskFunc) + }), + } +} + +func (p *PDPTaskAddPiece) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_pipeline + WHERE add_piece_task_id IS NULL + AND after_add_piece = FALSE + AND after_add_piece_msg = FALSE + AND aggregated = TRUE + LIMIT 1`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_pipeline: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid deal ID found for scheduling") + } + + _, err = tx.Exec(`UPDATE pdp_pipeline SET add_piece_task_id = $1 WHERE id = $2 AND after_add_piece = FALSE AND after_add_piece_msg = FALSE AND aggregated = TRUE`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (p *PDPTaskAddPiece) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &PDPTaskAddPiece{} +var _ = harmonytask.Reg(&PDPTaskAddPiece{}) diff --git a/tasks/pdp/task_aggregation.go b/tasks/pdp/task_aggregation.go new file mode 100644 index 000000000..87c041f4b --- /dev/null +++ b/tasks/pdp/task_aggregation.go @@ -0,0 +1,395 @@ +package pdp + +import ( + "context" + "errors" + "fmt" + "io" + "math/bits" + "time" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-data-segment/datasegment" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/mk20" +) + +type AggregatePDPDealTask struct { + db *harmonydb.DB + sc *ffi.SealCalls +} + +func NewAggregatePDPDealTask(db *harmonydb.DB, sc *ffi.SealCalls) *AggregatePDPDealTask { + return &AggregatePDPDealTask{ + db: db, + sc: sc, + } +} + +func (a *AggregatePDPDealTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var pieces []struct { + PieceCidV2 string `db:"piece_cid_v2"` + PieceRef int64 `db:"piece_ref"` + ID string `db:"id"` + AggrIndex int `db:"aggr_index"` + Aggregated bool `db:"aggregated"` + Aggregation int `db:"deal_aggregation"` + } + + err = a.db.Select(ctx, &pieces, ` + SELECT + piece_cid_v2, + piece_ref, + id, + aggr_index, + aggregated, + deal_aggregation + FROM + pdp_pipeline + WHERE + agg_task_id = $1 ORDER BY aggr_index ASC`, taskID) + if err != nil { + return false, xerrors.Errorf("getting piece details: %w", err) + } + + if len(pieces) == 0 { + return false, xerrors.Errorf("no pieces to aggregate for task %d", taskID) + } + + if len(pieces) == 1 { + n, err := a.db.Exec(ctx, `UPDATE pdp_pipeline SET aggregated = TRUE, agg_task_id = NULL + WHERE id = $1 + AND agg_task_id = $2`, pieces[0].ID, taskID) + if err != nil { + return false, xerrors.Errorf("updating aggregated piece details in DB: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row updated, got %d", n) + } + log.Infof("skipping aggregation as deal %s only has 1 piece for task %s", pieces[0].ID, taskID) + return true, nil + } + + id := pieces[0].ID + + ID, err := ulid.Parse(id) + if err != nil { + return false, xerrors.Errorf("parsing deal ID: %w", err) + } + + deal, err := mk20.DealFromDB(ctx, a.db, ID) + if err != nil { + return false, xerrors.Errorf("getting deal details from DB: %w", err) + } + + pi, err := deal.PieceInfo() + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + var pinfos []abi.PieceInfo + var readers []io.Reader + + var refIDs []int64 + + for _, piece := range pieces { + if piece.Aggregated { + return false, xerrors.Errorf("piece %s for deal %s already aggregated for task %d", piece.PieceCidV2, piece.ID, taskID) + } + if piece.Aggregation != 1 { + return false, xerrors.Errorf("incorrect aggregation value for piece %s for deal %s for task %d", piece.PieceCidV2, piece.ID, taskID) + } + if piece.ID != id { + return false, xerrors.Errorf("piece details do not match") + } + + var reader io.Reader // io.ReadCloser is not supported by padreader + var closer io.Closer + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = a.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, piece.PieceRef) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + if len(pieceID) != 1 { + return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) + } + + pr, err := a.sc.PieceReader(ctx, pieceID[0].PieceID) + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + closer = pr + reader = pr + defer func() { + _ = closer.Close() + }() + + pcid2, err := cid.Parse(piece.PieceCidV2) + if err != nil { + return false, xerrors.Errorf("parsing piece cid: %w", err) + } + + pinfo, err := mk20.GetPieceInfo(pcid2) + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + pinfos = append(pinfos, abi.PieceInfo{ + Size: pinfo.Size, + PieceCID: pinfo.PieceCIDV1, + }) + + readers = append(readers, io.LimitReader(reader, int64(pinfo.RawSize))) + refIDs = append(refIDs, piece.PieceRef) + } + + _, aggregatedRawSize, err := datasegment.ComputeDealPlacement(pinfos) + if err != nil { + return false, xerrors.Errorf("computing aggregated piece size: %w", err) + } + + overallSize := abi.PaddedPieceSize(aggregatedRawSize) + // we need to make this the 'next' power of 2 in order to have space for the index + next := 1 << (64 - bits.LeadingZeros64(uint64(overallSize+256))) + + aggr, err := datasegment.NewAggregate(abi.PaddedPieceSize(next), pinfos) + if err != nil { + return false, xerrors.Errorf("creating aggregate: %w", err) + } + + outR, err := aggr.AggregateObjectReader(readers) + if err != nil { + return false, xerrors.Errorf("aggregating piece readers: %w", err) + } + + var parkedPieceID, pieceRefID int64 + var pieceParked bool + + comm, err := a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // TODO: Review this logic for incomplete pieces + // Check if we already have the piece, if found then verify access and skip rest of the processing + var pid int64 + var complete bool + err = tx.QueryRow(`SELECT id, complete FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = TRUE`, pi.PieceCIDV1.String(), pi.Size).Scan(&pid, &complete) + if err == nil { + // If piece exists then check if we can access the data + pr, err := a.sc.PieceReader(ctx, storiface.PieceNumber(pid)) + if err != nil { + // If piece does not exist then we will park it otherwise fail here + if !errors.Is(err, storiface.ErrSectorNotFound) { + // We should fail here because any subsequent operation which requires access to data will also fail + // till this error is fixed + return false, fmt.Errorf("failed to get piece reader: %w", err) + } + } + defer func() { + _ = pr.Close() + }() + pieceParked = true + parkedPieceID = pid + } else { + if !errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("failed to check if piece already exists: %w", err) + } + // If piece does not exist then let's create one + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term, skip) + VALUES ($1, $2, $3, TRUE, TRUE) RETURNING id`, + pi.PieceCIDV1.String(), pi.Size, pi.RawSize).Scan(&parkedPieceID) + if err != nil { + return false, fmt.Errorf("failed to create parked_pieces entry: %w", err) + } + } + + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id + `, parkedPieceID, "/Aggregate").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("saving aggregated chunk details to DB: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed := true + + // Clean up piece park tables in case of failure + // TODO: Figure out if there is a race condition with cleanup task + defer func() { + if failed { + _, ferr := a.db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, pieceRefID) + if ferr != nil { + log.Errorf("failed to delete parked_piece_refs entry: %w", ferr) + } + } + }() + + // Write piece if not already complete + if !pieceParked { + upi, _, err := a.sc.WriteUploadPiece(ctx, storiface.PieceNumber(parkedPieceID), int64(pi.RawSize), outR, storiface.PathStorage, true) + if err != nil { + return false, xerrors.Errorf("writing aggregated piece data to storage: %w", err) + } + + if !upi.PieceCID.Equals(pi.PieceCIDV1) { + return false, xerrors.Errorf("commP mismatch calculated %s and supplied %s", upi.PieceCID.String(), pi.PieceCIDV1.String()) + } + + if upi.Size != pi.Size { + return false, xerrors.Errorf("commP size mismatch calculated %d and supplied %d", upi.Size, pi.Size) + } + } + + comm, err = a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Replace the pipeline piece with a new aggregated piece + _, err = tx.Exec(`DELETE FROM pdp_pipeline WHERE id = $1`, id) + if err != nil { + return false, fmt.Errorf("failed to delete pipeline pieces: %w", err) + } + + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = ANY($1) AND long_term = FALSE`, refIDs) + if err != nil { + return false, fmt.Errorf("failed to delete parked_piece_refs entries: %w", err) + } + + _, err = tx.Exec(`UPDATE parked_pieces SET complete = true WHERE id = $1 AND complete = FALSE`, parkedPieceID) + if err != nil { + return false, fmt.Errorf("failed to mark piece as complete: %w", err) + } + + pdp := deal.Products.PDPV1 + retv := deal.Products.RetrievalV1 + + n, err := tx.Exec(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, extra_data, piece_ref, + downloaded, deal_aggregation, aggr_index, aggregated, indexing, announce, announce_payload, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, TRUE, $7, 0, TRUE, $8, $9, $10, TRUE)`, + id, deal.Client, deal.Data.PieceCID.String(), *pdp.DataSetID, + pdp.ExtraData, pieceRefID, deal.Data.Format.Aggregate.Type, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if err != nil { + return false, xerrors.Errorf("inserting aggregated piece in PDP pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting aggregated piece in PDP pipeline: %d rows affected", n) + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("saving aggregated piece details to DB: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed = false + + return true, nil +} + +func (a *AggregatePDPDealTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + // If no local pieceRef was found then just return first TaskID + return &ids[0], nil +} + +func (a *AggregatePDPDealTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "AggregatePDPDeal", + Cost: resources.Resources{ + Cpu: 1, + Ram: 4 << 30, + }, + MaxFailures: 3, + IAmBored: passcall.Every(3*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return a.schedule(context.Background(), taskFunc) + }), + } +} + +func (a *AggregatePDPDealTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var deals []struct { + ID string `db:"id"` + Count int `db:"count"` + } + + err := a.db.Select(ctx, &deals, `SELECT id, COUNT(*) AS count + FROM pdp_pipeline + GROUP BY id + HAVING bool_and(downloaded) + AND bool_and(after_commp) + AND bool_and(NOT aggregated) + AND bool_and(agg_task_id IS NULL);`) + if err != nil { + log.Errorf("getting deals to aggregate: %w", err) + return + } + + if len(deals) == 0 { + return false, nil + } + + deal := deals[0] + + log.Infow("processing aggregation task", "deal", deal.ID, "count", deal.Count) + n, err := tx.Exec(`UPDATE pdp_pipeline SET agg_task_id = $1 + WHERE id = $2 + AND downloaded = TRUE + AND after_commp = TRUE + AND aggregated = FALSE + AND agg_task_id IS NULL`, id, deal.ID) + if err != nil { + return false, xerrors.Errorf("creating aggregation task for PDP: %w", err) + } + + if n == deal.Count { + log.Infow("aggregation task created successfully", "deal", deal.ID) + } + + stop = false + + return n == deal.Count, nil + }) + + } + + return nil +} + +func (a *AggregatePDPDealTask) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ = harmonytask.Reg(&AggregatePDPDealTask{}) +var _ harmonytask.TaskInterface = &AggregatePDPDealTask{} diff --git a/tasks/pdp/task_commp.go b/tasks/pdp/task_commp.go new file mode 100644 index 000000000..aa2c244c3 --- /dev/null +++ b/tasks/pdp/task_commp.go @@ -0,0 +1,321 @@ +package pdp + +import ( + "context" + "errors" + "io" + "net/url" + "strconv" + "time" + + "github.com/ipfs/go-cid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-commp-utils/writer" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/mk20" +) + +type PDPCommpTask struct { + db *harmonydb.DB + sc *ffi.SealCalls + max int +} + +func NewPDPCommpTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *PDPCommpTask { + return &PDPCommpTask{ + db: db, + sc: sc, + max: max, + } +} + +func (c *PDPCommpTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var pieces []struct { + Pcid string `db:"piece_cid_v2"` + Ref int64 `db:"piece_ref"` + ID string `db:"id"` + } + + err = c.db.Select(ctx, &pieces, `SELECT id, piece_cid_v2, piece_ref FROM pdp_pipeline + WHERE commp_task_id = $1 + AND downloaded = TRUE;`, taskID) + if err != nil { + return false, xerrors.Errorf("getting piece details: %w", err) + } + if len(pieces) != 1 { + return false, xerrors.Errorf("expected 1 piece, got %d", len(pieces)) + } + piece := pieces[0] + + pcid, err := cid.Parse(piece.Pcid) + if err != nil { + return false, xerrors.Errorf("parsing piece: %w", err) + } + + pi, err := mk20.GetPieceInfo(pcid) + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = c.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, piece.Ref) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + if len(pieceID) != 1 { + return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) + } + + pr, err := c.sc.PieceReader(ctx, pieceID[0].PieceID) + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + pReader, pSz := padreader.New(pr, pi.RawSize) + + defer func() { + _ = pr.Close() + }() + + wr := new(commp.Calc) + written, err := io.CopyBuffer(wr, pReader, make([]byte, writer.CommPBuf)) + if err != nil { + return false, xerrors.Errorf("copy into commp writer: %w", err) + } + + if written != int64(pSz) { + return false, xerrors.Errorf("number of bytes written to CommP writer %d not equal to the file size %d", written, pSz) + } + + digest, size, err := wr.Digest() + if err != nil { + return false, xerrors.Errorf("computing commP failed: %w", err) + } + + calculatedCommp, err := commcid.DataCommitmentV1ToCID(digest) + if err != nil { + return false, xerrors.Errorf("computing commP failed: %w", err) + } + + if !calculatedCommp.Equals(pi.PieceCIDV1) { + return false, xerrors.Errorf("commp mismatch: calculated %s and expected %s", calculatedCommp, pi.PieceCIDV1) + } + + if pi.Size != abi.PaddedPieceSize(size) { + return false, xerrors.Errorf("pieceSize mismatch: expected %d, got %d", pi.Size, abi.PaddedPieceSize(size)) + } + + n, err := c.db.Exec(ctx, `UPDATE pdp_pipeline SET after_commp = TRUE, commp_task_id = NULL + WHERE id = $1 + AND piece_cid_v2 = $2 + AND downloaded = TRUE + AND after_commp = FALSE + AND commp_task_id = $3`, + piece.ID, piece.Pcid, taskID) + + if err != nil { + return false, xerrors.Errorf("store commp success: updating pdp pipeline: %w", err) + } + + if n != 1 { + return false, xerrors.Errorf("store commp success: updated %d rows", n) + } + + return true, nil + +} + +func (c *PDPCommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + // CommP task can be of 2 types + // 1. Using ParkPiece pieceRef + // 2. Using remote HTTP reader + // ParkPiece should be scheduled on same node which has the piece + // Remote HTTP ones can be scheduled on any node + + ctx := context.Background() + + var tasks []struct { + TaskID harmonytask.TaskID `db:"commp_task_id"` + StorageID string `db:"storage_id"` + Url *string `db:"url"` + } + + indIDs := make([]int64, len(ids)) + for i, id := range ids { + indIDs[i] = int64(id) + } + + comm, err := c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = tx.Select(&tasks, ` SELECT + commp_task_id, + url + FROM + market_mk12_deal_pipeline + WHERE + commp_task_id = ANY ($1) + + UNION ALL + + SELECT + commp_task_id, + url + FROM + market_mk20_pipeline + WHERE + commp_task_id = ANY ($1); + `, indIDs) + if err != nil { + return false, xerrors.Errorf("failed to get deal details from DB: %w", err) + } + + if storiface.FTPiece != 32 { + panic("storiface.FTPiece != 32") + } + + for _, task := range tasks { + if task.Url != nil { + goUrl, err := url.Parse(*task.Url) + if err != nil { + return false, xerrors.Errorf("parsing data URL: %w", err) + } + if goUrl.Scheme == "pieceref" { + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return false, xerrors.Errorf("parsing piece reference number: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = tx.Select(&pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + var sLocation string + + err = tx.QueryRow(` + SELECT storage_id FROM sector_location + WHERE miner_id = 0 AND sector_num = $1 AND sector_filetype = 32`, pieceID[0].PieceID).Scan(&sLocation) + + if err != nil { + return false, xerrors.Errorf("failed to get storage location from DB: %w", err) + } + + task.StorageID = sLocation + } + } + } + + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return nil, err + } + + if !comm { + return nil, xerrors.Errorf("failed to commit the transaction") + } + + ls, err := c.sc.LocalStorage(ctx) + if err != nil { + return nil, xerrors.Errorf("getting local storage: %w", err) + } + + acceptables := map[harmonytask.TaskID]bool{} + + for _, t := range ids { + acceptables[t] = true + } + + for _, t := range tasks { + if _, ok := acceptables[t.TaskID]; !ok { + continue + } + + for _, l := range ls { + if string(l.ID) == t.StorageID { + return &t.TaskID, nil + } + } + } + + // If no local pieceRef was found then just return first TaskID + return &ids[0], nil +} + +func (c *PDPCommpTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(c.max), + Name: "PDPCommP", + Cost: resources.Resources{ + Cpu: 1, + Ram: 1 << 30, + }, + MaxFailures: 3, + IAmBored: passcall.Every(3*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return c.schedule(context.Background(), taskFunc) + }), + } +} + +func (c *PDPCommpTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_pipeline + WHERE commp_task_id IS NULL + AND after_commp = FALSE + AND downloaded = TRUE`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_pipeline: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid deal ID found for scheduling") + } + + _, err = tx.Exec(`UPDATE pdp_pipeline SET commp_task_id = $1 WHERE id = $2 AND commp_task_id IS NULL AND after_commp = FALSE AND downloaded = TRUE`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (c *PDPCommpTask) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ = harmonytask.Reg(&PDPCommpTask{}) +var _ harmonytask.TaskInterface = &PDPCommpTask{} diff --git a/tasks/pdp/task_delete_data_set.go b/tasks/pdp/task_delete_data_set.go new file mode 100644 index 000000000..e0ed526ab --- /dev/null +++ b/tasks/pdp/task_delete_data_set.go @@ -0,0 +1,199 @@ +package pdp + +import ( + "context" + "errors" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/pdp/contract" + "github.com/filecoin-project/curio/tasks/message" +) + +type PDPTaskDeleteDataSet struct { + db *harmonydb.DB + sender *message.SenderETH + ethClient *ethclient.Client + filClient PDPServiceNodeApi +} + +func NewPDPTaskDeleteDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client, filClient PDPServiceNodeApi) *PDPTaskDeleteDataSet { + return &PDPTaskDeleteDataSet{ + db: db, + sender: sender, + ethClient: ethClient, + filClient: filClient, + } +} + +func (p *PDPTaskDeleteDataSet) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + var pdeletes []struct { + SetID int64 `db:"set_id"` + ExtraData []byte `db:"extra_data"` + } + + err = p.db.Select(ctx, &pdeletes, `SELECT set_id, extra_data FROM pdp_data_set_delete WHERE task_id = $1 AND tx_hash IS NULL`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to get task details from DB: %w", err) + } + + if len(pdeletes) != 1 { + return false, xerrors.Errorf("incorrect rows for dataset delete found for taskID %d", taskID) + } + + pdelete := pdeletes[0] + + extraDataBytes := []byte{} + + dataSetID := new(big.Int).SetUint64(uint64(pdelete.SetID)) + + if pdelete.ExtraData != nil { + extraDataBytes = pdelete.ExtraData + } + + pdpContracts := contract.ContractAddresses() + pdpVerifierAddress := pdpContracts.PDPVerifier + + pdpVerifier, err := contract.NewPDPVerifier(pdpVerifierAddress, p.ethClient) + if err != nil { + return false, xerrors.Errorf("failed to instantiate PDPVerifier contract at %s: %w", pdpVerifierAddress.Hex(), err) + } + + callOpts := &bind.CallOpts{ + Context: ctx, + } + + // Get the sender address for this dataset + owner, _, err := pdpVerifier.GetDataSetStorageProvider(callOpts, dataSetID) + if err != nil { + return false, xerrors.Errorf("failed to get owner: %w", err) + } + + // Manually create the transaction without requiring a Signer + // Obtain the ABI of the PDPVerifier contract + abiData, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return false, xerrors.Errorf("getting PDPVerifier ABI: %w", err) + } + + // Pack the method call data + data, err := abiData.Pack("deleteDataSet", dataSetID, extraDataBytes) + if err != nil { + return false, xerrors.Errorf("packing data: %w", err) + } + + // Prepare the transaction (nonce will be set to 0, SenderETH will assign it) + tx := types.NewTransaction( + 0, + contract.ContractAddresses().PDPVerifier, + big.NewInt(0), + 0, + nil, + data, + ) + + // Send the transaction using SenderETH + reason := "pdp-delete-data-set" + txHash, err := p.sender.Send(ctx, owner, tx, reason) + if err != nil { + return false, xerrors.Errorf("sending transaction: %w", err) + } + + // Insert into message_waits_eth and pdp_data_set_delete + txHashLower := strings.ToLower(txHash.Hex()) + + comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE pdp_data_set_delete SET tx_hash = $1, task_id = NULL WHERE task_id = $2`, txHashLower, taskID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set_delete: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows updated for pdp_data_set_delete: %d", n) + } + + _, err = tx.Exec(`INSERT INTO message_waits_eth (signed_tx_hash, tx_status) VALUES ($1, $2)`, txHashLower, "pending") + if err != nil { + return false, xerrors.Errorf("failed to insert into message_waits_eth: %w", err) + } + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return false, xerrors.Errorf("failed to commit transaction: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit transaction") + } + + return true, nil +} + +func (p *PDPTaskDeleteDataSet) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PDPTaskDeleteDataSet) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PDPDelDataSet", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(3*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return p.schedule(context.Background(), taskFunc) + }), + } +} + +func (p *PDPTaskDeleteDataSet) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_data_set_delete WHERE task_id IS NULL AND tx_hash IS NULL LIMIT 1`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_data_set_delete: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid id found for taskID") + } + + _, err = tx.Exec(`UPDATE pdp_data_set_delete SET task_id = $1 WHERE id = $2 AND tx_hash IS NULL`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_data_set_delete: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (p *PDPTaskDeleteDataSet) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &PDPTaskDeleteDataSet{} +var _ = harmonytask.Reg(&PDPTaskDeleteDataSet{}) diff --git a/tasks/pdp/task_delete_piece.go b/tasks/pdp/task_delete_piece.go new file mode 100644 index 000000000..e4ed7805c --- /dev/null +++ b/tasks/pdp/task_delete_piece.go @@ -0,0 +1,208 @@ +package pdp + +import ( + "context" + "errors" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/pdp/contract" + "github.com/filecoin-project/curio/tasks/message" +) + +type PDPTaskDeletePiece struct { + db *harmonydb.DB + sender *message.SenderETH + ethClient *ethclient.Client +} + +func (p *PDPTaskDeletePiece) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var rdeletes []struct { + ID string `db:"id"` + SetID int64 `db:"set_id"` + Pieces []int64 `db:"pieces"` + ExtraData []byte `db:"extra_data"` + } + + err = p.db.Select(ctx, &rdeletes, `SELECT id, set_id, pieces, extra_data FROM pdp_piece_delete WHERE task_id = $1 AND tx_hash IS NULL`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to get task details from DB: %w", err) + } + + if len(rdeletes) != 1 { + return false, xerrors.Errorf("incorrect rows for delete piece found for taskID %d", taskID) + } + + rdelete := rdeletes[0] + + extraDataBytes := []byte{} + + if rdelete.ExtraData != nil { + extraDataBytes = rdelete.ExtraData + } + + dataSetID := new(big.Int).SetUint64(uint64(rdelete.SetID)) + + pdpContracts := contract.ContractAddresses() + pdpVerifierAddress := pdpContracts.PDPVerifier + + pdpVerifier, err := contract.NewPDPVerifier(pdpVerifierAddress, p.ethClient) + if err != nil { + return false, xerrors.Errorf("failed to instantiate PDPVerifier contract at %s: %w", pdpVerifierAddress.Hex(), err) + } + + callOpts := &bind.CallOpts{ + Context: ctx, + } + + // Get the sender address for this dataset + owner, _, err := pdpVerifier.GetDataSetStorageProvider(callOpts, dataSetID) + if err != nil { + return false, xerrors.Errorf("failed to get owner: %w", err) + } + + var pieces []*big.Int + for _, piece := range rdelete.Pieces { + pieces = append(pieces, new(big.Int).SetUint64(uint64(piece))) + } + + abiData, err := contract.PDPVerifierMetaData.GetAbi() + if err != nil { + return false, xerrors.Errorf("getting PDPVerifier ABI: %w", err) + } + + // Pack the method call data + data, err := abiData.Pack("schedulePieceDeletions", dataSetID, pieces, extraDataBytes) + if err != nil { + return false, xerrors.Errorf("packing data: %w", err) + } + + // Prepare the transaction (nonce will be set to 0, SenderETH will assign it) + tx := types.NewTransaction( + 0, + contract.ContractAddresses().PDPVerifier, + big.NewInt(0), + 0, + nil, + data, + ) + + // Send the transaction using SenderETH + reason := "pdp-remove-piece" + txHash, err := p.sender.Send(ctx, owner, tx, reason) + if err != nil { + return false, xerrors.Errorf("sending transaction: %w", err) + } + + // Insert into message_waits_eth and pdp_data_set_delete + txHashLower := strings.ToLower(txHash.Hex()) + + comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE pdp_piece_delete SET tx_hash = $1, task_id = NULL WHERE task_id = $2`, txHashLower, taskID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_piece_delete: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("incorrect number of rows updated for pdp_piece_delete: %d", n) + } + + _, err = tx.Exec(`INSERT INTO message_waits_eth (signed_tx_hash, tx_status) VALUES ($1, $2)`, txHashLower, "pending") + if err != nil { + return false, xerrors.Errorf("failed to insert into message_waits_eth: %w", err) + } + return true, nil + + // TODO: INSERT IPNI and Index removal tasks + + }, harmonydb.OptionRetry()) + + if err != nil { + return false, xerrors.Errorf("failed to commit transaction: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit transaction") + } + + return true, nil +} + +func (p *PDPTaskDeletePiece) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PDPTaskDeletePiece) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PDPDeletePiece", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(5*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return p.schedule(context.Background(), taskFunc) + }), + } +} + +func (p *PDPTaskDeletePiece) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_piece_delete + WHERE task_id IS NULL + AND tx_hash IS NULL LIMIT 1`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_piece_delete: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid deal ID found for scheduling") + } + + _, err = tx.Exec(`UPDATE pdp_piece_delete SET task_id = $1 WHERE id = $2 AND task_id IS NULL AND tx_hash IS NULL`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_piece_delete: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (p *PDPTaskDeletePiece) Adder(taskFunc harmonytask.AddTaskFunc) {} + +func NewPDPTaskDeletePiece(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client) *PDPTaskDeletePiece { + return &PDPTaskDeletePiece{ + db: db, + sender: sender, + ethClient: ethClient, + } +} + +var _ harmonytask.TaskInterface = &PDPTaskDeletePiece{} +var _ = harmonytask.Reg(&PDPTaskDeletePiece{}) diff --git a/tasks/pdp/task_init_pp.go b/tasks/pdp/task_init_pp.go index b5bce2010..c067f162e 100644 --- a/tasks/pdp/task_init_pp.go +++ b/tasks/pdp/task_init_pp.go @@ -2,12 +2,13 @@ package pdp import ( "context" - "database/sql" + "errors" "math/big" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -48,31 +49,31 @@ func NewInitProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil return nil } - // Now query the db for proof sets needing nextProvingPeriod inital call + // Now query the db for data sets needing nextProvingPeriod initial call var toCallInit []struct { - ProofSetID int64 `db:"id"` + DataSetID int64 `db:"id"` } err := db.Select(ctx, &toCallInit, ` SELECT id - FROM pdp_proof_sets + FROM pdp_data_set WHERE challenge_request_task_id IS NULL AND init_ready AND prove_at_epoch IS NULL `) - if err != nil && err != sql.ErrNoRows { - return xerrors.Errorf("failed to select proof sets needing nextProvingPeriod: %w", err) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("failed to select data sets needing nextProvingPeriod: %w", err) } for _, ps := range toCallInit { ipp.addFunc.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - // Update pdp_proof_sets to set challenge_request_task_id = id + // Update pdp_data_set to set challenge_request_task_id = id affected, err := tx.Exec(` - UPDATE pdp_proof_sets + UPDATE pdp_data_set SET challenge_request_task_id = $1 WHERE id = $2 AND challenge_request_task_id IS NULL - `, id, ps.ProofSetID) + `, id, ps.DataSetID) if err != nil { - return false, xerrors.Errorf("failed to update pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) } if affected == 0 { // Someone else might have already scheduled the task @@ -92,49 +93,59 @@ func NewInitProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil func (ipp *InitProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { ctx := context.Background() - // Select the proof set where challenge_request_task_id = taskID - var proofSetID int64 + // Select the data set where challenge_request_task_id = taskID + var dataSetID int64 err = ipp.db.QueryRow(ctx, ` SELECT id - FROM pdp_proof_sets + FROM pdp_data_set WHERE challenge_request_task_id = $1 - `, taskID).Scan(&proofSetID) - if err == sql.ErrNoRows { - // No matching proof set, task is done (something weird happened, and e.g another task was spawned in place of this one) + `, taskID).Scan(&dataSetID) + if errors.Is(err, pgx.ErrNoRows) { + // No matching data set, task is done (something weird happened, and e.g. another task was spawned in place of this one) return true, nil } if err != nil { - return false, xerrors.Errorf("failed to query pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to query pdp_data_set: %w", err) } - // Get the listener address for this proof set from the PDPVerifier contract + // Get the listener address for this data set from the PDPVerifier contract pdpVerifier, err := contract.NewPDPVerifier(contract.ContractAddresses().PDPVerifier, ipp.ethClient) if err != nil { return false, xerrors.Errorf("failed to instantiate PDPVerifier contract: %w", err) } - listenerAddr, err := pdpVerifier.GetProofSetListener(nil, big.NewInt(proofSetID)) + // Check if the data set has any leaves (pieces) before attempting to initialize proving period + leafCount, err := pdpVerifier.GetDataSetLeafCount(nil, big.NewInt(dataSetID)) if err != nil { - return false, xerrors.Errorf("failed to get listener address for proof set %d: %w", proofSetID, err) + return false, xerrors.Errorf("failed to get leaf count for data set %d: %w", dataSetID, err) + } + if leafCount.Cmp(big.NewInt(0)) == 0 { + // No leaves in the data set yet, skip initialization + // Return done=false to retry later (the task will be retried by the scheduler) + return false, xerrors.Errorf("no leaves in data set %d, skipping initialization", dataSetID) } - // Determine the next challenge window start by consulting the listener - provingSchedule, err := contract.NewIPDPProvingSchedule(listenerAddr, ipp.ethClient) + listenerAddr, err := pdpVerifier.GetDataSetListener(nil, big.NewInt(dataSetID)) if err != nil { - return false, xerrors.Errorf("failed to create proving schedule binding, check that listener has proving schedule methods: %w", err) + return false, xerrors.Errorf("failed to get listener address for data set %d: %w", dataSetID, err) } - // ChallengeWindow - challengeWindow, err := provingSchedule.ChallengeWindow(&bind.CallOpts{Context: ctx}) + // Get the proving schedule from the listener (handles view contract indirection) + provingSchedule, err := contract.GetProvingScheduleFromListener(listenerAddr, ipp.ethClient) if err != nil { - return false, xerrors.Errorf("failed to get challenge window: %w", err) + return false, xerrors.Errorf("failed to get proving schedule from listener: %w", err) } - init_prove_at, err := provingSchedule.InitChallengeWindowStart(&bind.CallOpts{Context: ctx}) + config, err := provingSchedule.GetPDPConfig(&bind.CallOpts{Context: ctx}) if err != nil { - return false, xerrors.Errorf("failed to get next challenge window start: %w", err) + return false, xerrors.Errorf("failed to get pdp config: %w", err) } + + // ChallengeWindow + challengeWindow := config.ChallengeWindow + + init_prove_at := config.InitChallengeWindowStart init_prove_at = init_prove_at.Add(init_prove_at, challengeWindow.Div(challengeWindow, big.NewInt(2))) // Give a buffer of 1/2 challenge window epochs so that we are still within challenge window // Instantiate the PDPVerifier contract pdpContracts := contract.ContractAddresses() @@ -146,7 +157,7 @@ func (ipp *InitProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func( return false, xerrors.Errorf("failed to get PDPVerifier ABI: %w", err) } - data, err := abiData.Pack("nextProvingPeriod", big.NewInt(proofSetID), init_prove_at, []byte{}) + data, err := abiData.Pack("nextProvingPeriod", big.NewInt(dataSetID), init_prove_at, []byte{}) if err != nil { return false, xerrors.Errorf("failed to pack data: %w", err) } @@ -166,7 +177,7 @@ func (ipp *InitProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func( return false, nil } - fromAddress, _, err := pdpVerifier.GetProofSetOwner(nil, big.NewInt(proofSetID)) + fromAddress, _, err := pdpVerifier.GetDataSetStorageProvider(nil, big.NewInt(dataSetID)) if err != nil { return false, xerrors.Errorf("failed to get default sender address: %w", err) } @@ -186,19 +197,19 @@ func (ipp *InitProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func( // Update the database in a transaction _, err = ipp.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - // Update pdp_proof_sets + // Update pdp_data_set affected, err := tx.Exec(` - UPDATE pdp_proof_sets + UPDATE pdp_data_set SET challenge_request_msg_hash = $1, prev_challenge_request_epoch = $2, prove_at_epoch = $3 WHERE id = $4 - `, txHash.Hex(), ts.Height(), init_prove_at.Uint64(), proofSetID) + `, txHash.Hex(), ts.Height(), init_prove_at.Uint64(), data) if err != nil { - return false, xerrors.Errorf("failed to update pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) } if affected == 0 { - return false, xerrors.Errorf("pdp_proof_sets update affected 0 rows") + return false, xerrors.Errorf("pdp_data_set update affected 0 rows") } // Insert into message_waits_eth diff --git a/tasks/pdp/task_next_pp.go b/tasks/pdp/task_next_pp.go index 14790e844..30c2d1954 100644 --- a/tasks/pdp/task_next_pp.go +++ b/tasks/pdp/task_next_pp.go @@ -2,11 +2,12 @@ package pdp import ( "context" - "database/sql" + "errors" "math/big" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -47,31 +48,31 @@ func NewNextProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil return nil } - // Now query the db for proof sets needing nextProvingPeriod + // Now query the db for data sets needing nextProvingPeriod var toCallNext []struct { - ProofSetID int64 `db:"id"` + DataSetID int64 `db:"id"` } err := db.Select(ctx, &toCallNext, ` SELECT id - FROM pdp_proof_sets + FROM pdp_data_set WHERE challenge_request_task_id IS NULL AND (prove_at_epoch + challenge_window) <= $1 `, apply.Height()) - if err != nil && err != sql.ErrNoRows { - return xerrors.Errorf("failed to select proof sets needing nextProvingPeriod: %w", err) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return xerrors.Errorf("failed to select data sets needing nextProvingPeriod: %w", err) } for _, ps := range toCallNext { n.addFunc.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - // Update pdp_proof_sets to set challenge_request_task_id = id + // Update pdp_data_set to set challenge_request_task_id = id affected, err := tx.Exec(` - UPDATE pdp_proof_sets + UPDATE pdp_data_set SET challenge_request_task_id = $1 WHERE id = $2 AND challenge_request_task_id IS NULL - `, id, ps.ProofSetID) + `, id, ps.DataSetID) if err != nil { - return false, xerrors.Errorf("failed to update pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) } if affected == 0 { // Someone else might have already scheduled the task @@ -90,39 +91,40 @@ func NewNextProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil func (n *NextProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { ctx := context.Background() - // Select the proof set where challenge_request_task_id = taskID - var proofSetID int64 + // Select the data set where challenge_request_task_id = taskID + var dataSetID int64 err = n.db.QueryRow(ctx, ` SELECT id - FROM pdp_proof_sets + FROM pdp_data_set WHERE challenge_request_task_id = $1 AND prove_at_epoch IS NOT NULL - `, taskID).Scan(&proofSetID) - if err == sql.ErrNoRows { - // No matching proof set, task is done (something weird happened, and e.g another task was spawned in place of this one) + `, taskID).Scan(&dataSetID) + if errors.Is(err, pgx.ErrNoRows) { + // No matching data set, task is done (something weird happened, and e.g another task was spawned in place of this one) return true, nil } if err != nil { - return false, xerrors.Errorf("failed to query pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to query pdp_data_set: %w", err) } - // Get the listener address for this proof set from the PDPVerifier contract + // Get the listener address for this data set from the PDPVerifier contract pdpVerifier, err := contract.NewPDPVerifier(contract.ContractAddresses().PDPVerifier, n.ethClient) if err != nil { return false, xerrors.Errorf("failed to instantiate PDPVerifier contract: %w", err) } - listenerAddr, err := pdpVerifier.GetProofSetListener(nil, big.NewInt(proofSetID)) + listenerAddr, err := pdpVerifier.GetDataSetListener(nil, big.NewInt(dataSetID)) if err != nil { - return false, xerrors.Errorf("failed to get listener address for proof set %d: %w", proofSetID, err) + return false, xerrors.Errorf("failed to get listener address for data set %d: %w", dataSetID, err) } - // Determine the next challenge window start by consulting the listener - provingSchedule, err := contract.NewIPDPProvingSchedule(listenerAddr, n.ethClient) + // Get the proving schedule from the listener (handles view contract indirection) + provingSchedule, err := contract.GetProvingScheduleFromListener(listenerAddr, n.ethClient) if err != nil { - return false, xerrors.Errorf("failed to create proving schedule binding, check that listener has proving schedule methods: %w", err) + return false, xerrors.Errorf("failed to get proving schedule from listener: %w", err) } - next_prove_at, err := provingSchedule.NextChallengeWindowStart(nil, big.NewInt(proofSetID)) + + next_prove_at, err := provingSchedule.NextPDPChallengeWindowStart(nil, big.NewInt(dataSetID)) if err != nil { return false, xerrors.Errorf("failed to get next challenge window start: %w", err) } @@ -137,7 +139,7 @@ func (n *NextProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func() return false, xerrors.Errorf("failed to get PDPVerifier ABI: %w", err) } - data, err := abiData.Pack("nextProvingPeriod", big.NewInt(proofSetID), next_prove_at, []byte{}) + data, err := abiData.Pack("nextProvingPeriod", big.NewInt(dataSetID), next_prove_at, []byte{}) if err != nil { return false, xerrors.Errorf("failed to pack data: %w", err) } @@ -157,7 +159,7 @@ func (n *NextProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func() return false, nil } - fromAddress, _, err := pdpVerifier.GetProofSetOwner(nil, big.NewInt(proofSetID)) + fromAddress, _, err := pdpVerifier.GetDataSetStorageProvider(nil, big.NewInt(dataSetID)) if err != nil { return false, xerrors.Errorf("failed to get default sender address: %w", err) } @@ -177,19 +179,19 @@ func (n *NextProvingPeriodTask) Do(taskID harmonytask.TaskID, stillOwned func() // Update the database in a transaction _, err = n.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - // Update pdp_proof_sets + // Update pdp_data_set affected, err := tx.Exec(` - UPDATE pdp_proof_sets + UPDATE pdp_data_set SET challenge_request_msg_hash = $1, prev_challenge_request_epoch = $2, prove_at_epoch = $3 WHERE id = $4 - `, txHash.Hex(), ts.Height(), next_prove_at.Uint64(), proofSetID) + `, txHash.Hex(), ts.Height(), next_prove_at.Uint64(), dataSetID) if err != nil { - return false, xerrors.Errorf("failed to update pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) } if affected == 0 { - return false, xerrors.Errorf("pdp_proof_sets update affected 0 rows") + return false, xerrors.Errorf("pdp_data_set update affected 0 rows") } // Insert into message_waits_eth diff --git a/tasks/pdp/task_prove.go b/tasks/pdp/task_prove.go index 2f0a626a1..8f3c4dd16 100644 --- a/tasks/pdp/task_prove.go +++ b/tasks/pdp/task_prove.go @@ -2,14 +2,12 @@ package pdp import ( "context" - "database/sql" "encoding/binary" "encoding/hex" "errors" "io" + "math" "math/big" - "math/bits" - "sort" "sync/atomic" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -17,14 +15,13 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" - pool "github.com/libp2p/go-buffer-pool" "github.com/minio/sha256-simd" "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" "golang.org/x/crypto/sha3" "golang.org/x/xerrors" - "github.com/filecoin-project/go-commp-utils/zerocomm" - commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -32,8 +29,11 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/cachedreader" "github.com/filecoin-project/curio/lib/chainsched" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/promise" "github.com/filecoin-project/curio/lib/proof" + "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/mk20" "github.com/filecoin-project/curio/pdp/contract" "github.com/filecoin-project/curio/tasks/message" @@ -49,6 +49,7 @@ type ProveTask struct { sender *message.SenderETH cpr *cachedreader.CachedPieceReader fil ProveTaskChainApi + idx *indexstore.IndexStore head atomic.Pointer[chainTypes.TipSet] @@ -60,16 +61,17 @@ type ProveTaskChainApi interface { ChainHead(context.Context) (*chainTypes.TipSet, error) //perm:read } -func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethClient *ethclient.Client, fil ProveTaskChainApi, sender *message.SenderETH, cpr *cachedreader.CachedPieceReader) *ProveTask { +func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethClient *ethclient.Client, fil ProveTaskChainApi, sender *message.SenderETH, cpr *cachedreader.CachedPieceReader, idx *indexstore.IndexStore) *ProveTask { pt := &ProveTask{ db: db, ethClient: ethClient, sender: sender, cpr: cpr, fil: fil, + idx: idx, } - // ProveTasks are created on pdp_proof_sets entries where + // ProveTasks are created on pdp_data_set entries where // challenge_request_msg_hash is not null (=not yet landed) err := chainSched.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { @@ -84,13 +86,13 @@ func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethC pt.addFunc.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { // Select proof sets ready for proving - var proofSets []struct { + var dataSets []struct { ID int64 `db:"id"` } - err := tx.Select(&proofSets, ` + err := tx.Select(&dataSets, ` SELECT p.id - FROM pdp_proof_sets p + FROM pdp_data_set p INNER JOIN message_waits_eth mw on mw.signed_tx_hash = p.challenge_request_msg_hash WHERE p.challenge_request_msg_hash IS NOT NULL AND mw.tx_success = TRUE AND p.prove_at_epoch < $1 LIMIT 2 @@ -99,37 +101,37 @@ func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethC return false, xerrors.Errorf("failed to select proof sets: %w", err) } - if len(proofSets) == 0 { + if len(dataSets) == 0 { // No proof sets to process return false, nil } // Determine if there might be more proof sets to process - more = len(proofSets) > 1 + more = len(dataSets) > 1 // Process the first proof set - todo := proofSets[0] + todo := dataSets[0] - // Insert a new task into pdp_prove_tasks + // Insert a new task into pdp_proving_tasks affected, err := tx.Exec(` - INSERT INTO pdp_prove_tasks (proofset, task_id) + INSERT INTO pdp_proving_tasks (data_set_id, task_id) VALUES ($1, $2) ON CONFLICT DO NOTHING `, todo.ID, id) if err != nil { - return false, xerrors.Errorf("failed to insert into pdp_prove_tasks: %w", err) + return false, xerrors.Errorf("failed to insert into pdp_proving_tasks: %w", err) } if affected == 0 { return false, nil } - // Update pdp_proof_sets to set next_challenge_possible = FALSE + // Update pdp_data_set to set next_challenge_possible = FALSE affected, err = tx.Exec(` - UPDATE pdp_proof_sets + UPDATE pdp_data_set SET challenge_request_msg_hash = NULL WHERE id = $1 AND challenge_request_msg_hash IS NOT NULL `, todo.ID) if err != nil { - return false, xerrors.Errorf("failed to update pdp_proof_sets: %w", err) + return false, xerrors.Errorf("failed to update pdp_data_set: %w", err) } if affected == 0 { more = false @@ -158,13 +160,13 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done ctx := context.Background() // Retrieve proof set and challenge epoch for the task - var proofSetID int64 + var dataSetID int64 err = p.db.QueryRow(context.Background(), ` - SELECT proofset - FROM pdp_prove_tasks + SELECT data_set_id + FROM pdp_proving_tasks WHERE task_id = $1 - `, taskID).Scan(&proofSetID) + `, taskID).Scan(&dataSetID) if err != nil { return false, xerrors.Errorf("failed to get task details: %w", err) } @@ -182,7 +184,7 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done } // Proof parameters - challengeEpoch, err := pdpVerifier.GetNextChallengeEpoch(callOpts, big.NewInt(proofSetID)) + challengeEpoch, err := pdpVerifier.GetNextChallengeEpoch(callOpts, big.NewInt(dataSetID)) if err != nil { return false, xerrors.Errorf("failed to get next challenge epoch: %w", err) } @@ -192,7 +194,7 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("failed to get chain randomness from beacon for pdp prove: %w", err) } - proofs, err := p.GenerateProofs(ctx, pdpVerifier, proofSetID, seed, contract.NumChallenges) + proofs, err := p.GenerateProofs(ctx, pdpVerifier, dataSetID, seed, contract.NumChallenges) if err != nil { return false, xerrors.Errorf("failed to generate proofs: %w", err) } @@ -202,7 +204,7 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("failed to get PDPVerifier ABI: %w", err) } - data, err := abiData.Pack("provePossession", big.NewInt(proofSetID), proofs) + data, err := abiData.Pack("provePossession", big.NewInt(dataSetID), proofs) if err != nil { return false, xerrors.Errorf("failed to pack data: %w", err) } @@ -225,21 +227,32 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done proofStr += "] ] ]" - log.Infof("PDP Prove Task: proofSetID: %d, taskID: %d, proofs: %s", proofSetID, taskID, proofStr) + log.Infof("PDP Prove Task: dataSetID: %d, taskID: %d, proofs: %s", dataSetID, taskID, proofStr) } */ // If gas used is 0 fee is maximized gasFee := big.NewInt(0) - proofFee, err := pdpVerifier.CalculateProofFee(callOpts, big.NewInt(proofSetID), gasFee) + pdpVerifierRaw := contract.PDPVerifierRaw{Contract: pdpVerifier} + + calcProofFeeResult := make([]any, 1) + err = pdpVerifierRaw.Call(callOpts, &calcProofFeeResult, "calculateProofFee", big.NewInt(dataSetID), gasFee) if err != nil { return false, xerrors.Errorf("failed to calculate proof fee: %w", err) } - // Add 2x buffer for certainty - proofFee = new(big.Int).Mul(proofFee, big.NewInt(3)) + if len(calcProofFeeResult) == 0 { + return false, xerrors.Errorf("failed to calculate proof fee: wrong number of return values") + } + if calcProofFeeResult[0] == nil { + return false, xerrors.Errorf("failed to calculate proof fee: nil return value") + } + if calcProofFeeResult[0].(*big.Int) == nil { + return false, xerrors.Errorf("failed to calculate proof fee: nil *big.Int return value") + } + proofFee := calcProofFeeResult[0].(*big.Int) - // Get the sender address for this proofset - owner, _, err := pdpVerifier.GetProofSetOwner(callOpts, big.NewInt(proofSetID)) + // Get the sender address for this dataset + owner, _, err := pdpVerifier.GetDataSetStorageProvider(callOpts, big.NewInt(dataSetID)) if err != nil { return false, xerrors.Errorf("failed to get owner: %w", err) } @@ -278,7 +291,7 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done } log.Infow("PDP Prove Task", - "proofSetID", proofSetID, + "dataSetID", dataSetID, "taskID", taskID, "proofs", proofLogs, "data", hex.EncodeToString(data), @@ -299,46 +312,40 @@ func (p *ProveTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("failed to send transaction: %w", err) } - // Remove the roots previously scheduled for deletion - err = p.cleanupDeletedRoots(ctx, proofSetID, pdpVerifier) - if err != nil { - return false, xerrors.Errorf("failed to cleanup deleted roots: %w", err) - } - - log.Infow("PDP Prove Task: transaction sent", "txHash", txHash, "proofSetID", proofSetID, "taskID", taskID) + log.Infow("PDP Prove Task: transaction sent", "txHash", txHash, "dataSetID", dataSetID, "taskID", taskID) // Task completed successfully return true, nil } -func (p *ProveTask) GenerateProofs(ctx context.Context, pdpService *contract.PDPVerifier, proofSetID int64, seed abi.Randomness, numChallenges int) ([]contract.PDPVerifierProof, error) { - proofs := make([]contract.PDPVerifierProof, numChallenges) +func (p *ProveTask) GenerateProofs(ctx context.Context, pdpService *contract.PDPVerifier, dataSetID int64, seed abi.Randomness, numChallenges int) ([]contract.IPDPTypesProof, error) { + proofs := make([]contract.IPDPTypesProof, numChallenges) callOpts := &bind.CallOpts{ Context: ctx, } - totalLeafCount, err := pdpService.GetChallengeRange(callOpts, big.NewInt(proofSetID)) + totalLeafCount, err := pdpService.GetChallengeRange(callOpts, big.NewInt(dataSetID)) if err != nil { return nil, xerrors.Errorf("failed to get proof set leaf count: %w", err) } totalLeaves := totalLeafCount.Uint64() challenges := lo.Times(numChallenges, func(i int) int64 { - return generateChallengeIndex(seed, proofSetID, i, totalLeaves) + return generateChallengeIndex(seed, dataSetID, i, totalLeaves) }) - rootId, err := pdpService.FindRootIds(callOpts, big.NewInt(proofSetID), lo.Map(challenges, func(i int64, _ int) *big.Int { return big.NewInt(i) })) + pieceId, err := pdpService.FindPieceIds(callOpts, big.NewInt(dataSetID), lo.Map(challenges, func(i int64, _ int) *big.Int { return big.NewInt(i) })) if err != nil { return nil, xerrors.Errorf("failed to find root IDs: %w", err) } for i := 0; i < numChallenges; i++ { - root := rootId[i] + piece := pieceId[i] - proof, err := p.proveRoot(ctx, proofSetID, root.RootId.Int64(), root.Offset.Int64()) + proof, err := p.proveRoot(ctx, dataSetID, piece.PieceId.Int64(), piece.Offset.Int64()) if err != nil { - return nil, xerrors.Errorf("failed to prove root %d (%d, %d, %d): %w", i, proofSetID, root.RootId.Int64(), root.Offset.Int64(), err) + return nil, xerrors.Errorf("failed to prove root %d (%d, %d, %d): %w", i, dataSetID, piece.PieceId.Int64(), piece.Offset.Int64(), err) } proofs[i] = proof @@ -347,7 +354,7 @@ func (p *ProveTask) GenerateProofs(ctx context.Context, pdpService *contract.PDP return proofs, nil } -func generateChallengeIndex(seed abi.Randomness, proofSetID int64, proofIndex int, totalLeaves uint64) int64 { +func generateChallengeIndex(seed abi.Randomness, dataSetID int64, proofIndex int, totalLeaves uint64) int64 { // Create a buffer to hold the concatenated data (96 bytes: 32 bytes * 3) data := make([]byte, 0, 96) @@ -355,10 +362,10 @@ func generateChallengeIndex(seed abi.Randomness, proofSetID int64, proofIndex in data = append(data, seed...) - // Convert proofSetID to 32-byte big-endian representation - proofSetIDBigInt := big.NewInt(proofSetID) - proofSetIDBytes := padTo32Bytes(proofSetIDBigInt.Bytes()) - data = append(data, proofSetIDBytes...) + // Convert dataSetID to 32-byte big-endian representation + dataSetIDBigInt := big.NewInt(dataSetID) + dataSetIDBytes := padTo32Bytes(dataSetIDBigInt.Bytes()) + data = append(data, dataSetIDBytes...) // Convert proofIndex to 8-byte big-endian representation proofIndexBytes := make([]byte, 8) @@ -380,7 +387,7 @@ func generateChallengeIndex(seed abi.Randomness, proofSetID int64, proofIndex in // Log for debugging log.Debugw("generateChallengeIndex", "seed", seed, - "proofSetID", proofSetID, + "dataSetID", dataSetID, "proofIndex", proofIndex, "totalLeaves", totalLeaves, "data", hex.EncodeToString(data), @@ -400,261 +407,175 @@ func padTo32Bytes(b []byte) []byte { return padded } -func (p *ProveTask) genSubrootMemtree(ctx context.Context, subrootCid string, subrootSize abi.PaddedPieceSize) ([]byte, error) { - subrootCidObj, err := cid.Parse(subrootCid) - if err != nil { - return nil, xerrors.Errorf("failed to parse subroot CID: %w", err) - } - - if subrootSize > proof.MaxMemtreeSize { - return nil, xerrors.Errorf("subroot size exceeds maximum: %d", subrootSize) - } - - subrootReader, unssize, err := p.cpr.GetSharedPieceReader(ctx, subrootCidObj) - if err != nil { - return nil, xerrors.Errorf("failed to get subroot reader: %w", err) - } - - var r io.Reader = subrootReader - - if unssize.Padded() > subrootSize { - return nil, xerrors.Errorf("subroot size mismatch: %d > %d", unssize.Padded(), subrootSize) - } else if unssize.Padded() < subrootSize { - // pad with zeros - r = io.MultiReader(r, nullreader.NewNullReader(abi.UnpaddedPieceSize(subrootSize-unssize.Padded()))) - } - - defer func() { - _ = subrootReader.Close() - }() - - return proof.BuildSha254Memtree(r, subrootSize.Unpadded()) -} - -func (p *ProveTask) proveRoot(ctx context.Context, proofSetID int64, rootId int64, challengedLeaf int64) (contract.PDPVerifierProof, error) { - const arity = 2 +func (p *ProveTask) proveRoot(ctx context.Context, dataSetID int64, rootId int64, challengedLeaf int64) (contract.IPDPTypesProof, error) { + //const arity = 2 rootChallengeOffset := challengedLeaf * LeafSize - // Retrieve the root and subroot - type subrootMeta struct { - Root string `db:"root"` - Subroot string `db:"subroot"` - SubrootOffset int64 `db:"subroot_offset"` // padded offset - SubrootSize int64 `db:"subroot_size"` // padded piece size - } - - var subroots []subrootMeta + var pieceCid string - err := p.db.Select(context.Background(), &subroots, ` - SELECT root, subroot, subroot_offset, subroot_size - FROM pdp_proofset_roots - WHERE proofset = $1 AND root_id = $2 - ORDER BY subroot_offset ASC - `, proofSetID, rootId) + err := p.db.QueryRow(context.Background(), `SELECT piece_cid_v2 FROM pdp_dataset_piece WHERE data_set_id = $1 AND root_id = $2`, dataSetID, rootId).Scan(&pieceCid) if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to get root and subroot: %w", err) + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get root and subroot: %w", err) } - // find first subroot with subroot_offset >= rootChallengeOffset - challSubRoot, challSubrootIdx, ok := lo.FindLastIndexOf(subroots, func(subroot subrootMeta) bool { - return subroot.SubrootOffset < rootChallengeOffset - }) - if !ok { - return contract.PDPVerifierProof{}, xerrors.New("no subroot found") - } - - // build subroot memtree - memtree, err := p.genSubrootMemtree(ctx, challSubRoot.Subroot, abi.PaddedPieceSize(challSubRoot.SubrootSize)) + pcid, err := cid.Parse(pieceCid) if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to generate subroot memtree: %w", err) + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to parse piece CID: %w", err) } - subrootChallengedLeaf := challengedLeaf - (challSubRoot.SubrootOffset / LeafSize) - log.Debugw("subrootChallengedLeaf", "subrootChallengedLeaf", subrootChallengedLeaf, "challengedLeaf", challengedLeaf, "subrootOffsetLs", challSubRoot.SubrootOffset/LeafSize) - - /* - type RawMerkleProof struct { - Leaf [32]byte - Proof [][32]byte - Root [32]byte - } - */ - subrootProof, err := proof.MemtreeProof(memtree, subrootChallengedLeaf) - pool.Put(memtree) + pi, err := mk20.GetPieceInfo(pcid) if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to generate subroot proof: %w", err) - } - log.Debugw("subrootProof", "subrootProof", subrootProof) - - // build partial top-tree - type treeElem struct { - Level int // 1 == leaf, NODE_SIZE - Hash [LeafSize]byte - } - type elemIndex struct { - Level int - ElemOffset int64 // offset in terms of nodes at the current level + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get piece info: %w", err) } - partialTree := map[elemIndex]treeElem{} - var subrootsSize abi.PaddedPieceSize - - // 1. prefill the partial tree - for _, subroot := range subroots { - subrootsSize += abi.PaddedPieceSize(subroot.SubrootSize) + var out contract.IPDPTypesProof + var rootDigest [32]byte - unsCid, err := cid.Parse(subroot.Subroot) + // If piece is less than 100 MiB, let's generate proof directly without using cache + if pi.RawSize < MinSizeForCache { + // Get original file reader + reader, _, err := p.cpr.GetSharedPieceReader(ctx, pcid) if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to parse subroot CID: %w", err) + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get piece reader: %w", err) } + defer func() { + _ = reader.Close() + }() - commp, err := commcid.CIDToPieceCommitmentV1(unsCid) + // Build Merkle tree from padded input + memTree, err := proof.BuildSha254Memtree(reader, pi.Size.Unpadded()) if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to convert CID to piece commitment: %w", err) + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to build memtree: %w", err) } + log.Debugw("proveRoot", "rootChallengeOffset", rootChallengeOffset, "challengedLeaf", challengedLeaf) - var comm [LeafSize]byte - copy(comm[:], commp) + mProof, err := proof.MemtreeProof(memTree, challengedLeaf) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to generate memtree proof: %w", err) + } - level := proof.NodeLevel(subroot.SubrootSize/LeafSize, arity) - offset := (subroot.SubrootOffset / LeafSize) >> uint(level-1) - partialTree[elemIndex{Level: level, ElemOffset: offset}] = treeElem{ - Level: level, - Hash: comm, + out = contract.IPDPTypesProof{ + Leaf: mProof.Leaf, + Proof: mProof.Proof, } - } - rootSize := nextPowerOfTwo(subrootsSize) - rootLevel := proof.NodeLevel(int64(rootSize/LeafSize), arity) + rootDigest = mProof.Root + } else { + //Calculate layer L such that 127 * 2^L >= targetReadSize + //→ 2^L >= targetReadSize / 32 + ratio := float64(4161536) / 32 + layerIdx := int(math.Ceil(math.Log2(ratio))) - // 2. build the partial tree - // we do the build from the right side of the tree - elements are sorted by size, so only elements on the right side can have missing siblings + leavesPerNode := int64(1) << layerIdx + snapshotNodeIndex := challengedLeaf >> layerIdx - isRight := func(offset int64) bool { - return offset&1 == 1 - } + has, node, err := p.idx.GetPDPNode(ctx, pcid, snapshotNodeIndex) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get node: %w", err) + } - for i := len(subroots) - 1; i >= 0; i-- { - subroot := subroots[i] - level := proof.NodeLevel(subroot.SubrootSize/LeafSize, arity) - offset := (subroot.SubrootOffset / LeafSize) >> uint(level-1) - firstSubroot := i == 0 + if !has { + // TODO: Trigger a Layer save task here and figure out if we should proceed or not + // TODO: Proceeding from here can cause memory issue for big pieces, we will need to generate proof using some other lib + panic("implement me") + } - curElem := partialTree[elemIndex{Level: level, ElemOffset: offset}] + log.Debugw("proveRoot", "rootChallengeOffset", rootChallengeOffset, "challengedLeaf", challengedLeaf, "layerIdx", layerIdx, "snapshotNodeIndex", snapshotNodeIndex, "node", node) - log.Debugw("processing partialtree subroot", "curElem", curElem, "level", level, "offset", offset, "subroot", subroot.SubrootOffset, "subrootSz", subroot.SubrootSize) + if node.Layer != layerIdx { + return contract.IPDPTypesProof{}, xerrors.Errorf("node layer mismatch: %d != %d", node.Layer, layerIdx) + } - for !isRight(offset) { - // find the rightSibling - siblingIndex := elemIndex{Level: level, ElemOffset: offset + 1} - rightSibling, ok := partialTree[siblingIndex] - if !ok { - // if we're processing the first subroot branch, AND we've ran out of right siblings, we're done - if firstSubroot { - break - } + startLeaf := snapshotNodeIndex << layerIdx + // Convert tree-based leaf range to file-based offset/length + offset := int64(abi.PaddedPieceSize(startLeaf * 32).Unpadded()) + length := int64(abi.PaddedPieceSize(leavesPerNode * 32).Unpadded()) - // create a zero rightSibling - rightSibling = treeElem{ - Level: level, - Hash: zerocomm.PieceComms[level-zerocomm.Skip-1], - } - log.Debugw("rightSibling zero", "rightSibling", rightSibling, "siblingIndex", siblingIndex, "level", level, "offset", offset) - partialTree[siblingIndex] = rightSibling - } + // Compute padded size to build Merkle tree + subrootSize := padreader.PaddedSize(uint64(length)).Padded() - // compute the parent - parent := proof.ComputeBinShaParent(curElem.Hash, rightSibling.Hash) - parentLevel := level + 1 - parentOffset := offset / arity + // Get original file reader + reader, reportedSize, err := p.cpr.GetSharedPieceReader(ctx, pcid) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get reader: %w", err) + } + defer func() { + _ = reader.Close() + }() - partialTree[elemIndex{Level: parentLevel, ElemOffset: parentOffset}] = treeElem{ - Level: parentLevel, - Hash: parent, - } + fileRemaining := int64(reportedSize) - offset - // move to the parent - level = parentLevel - offset = parentOffset - curElem = partialTree[elemIndex{Level: level, ElemOffset: offset}] + var data io.Reader + if fileRemaining < length { + data = io.MultiReader(reader, nullreader.NewNullReader(abi.UnpaddedPieceSize(int64(subrootSize.Unpadded())-fileRemaining))) + } else { + data = reader } - } - { - var partialTreeList []elemIndex - for k := range partialTree { - partialTreeList = append(partialTreeList, k) + memtree, err := proof.BuildSha254Memtree(data, subrootSize.Unpadded()) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to build memtree: %w", err) } - sort.Slice(partialTreeList, func(i, j int) bool { - if partialTreeList[i].Level != partialTreeList[j].Level { - return partialTreeList[i].Level < partialTreeList[j].Level - } - return partialTreeList[i].ElemOffset < partialTreeList[j].ElemOffset - }) - - } - challLevel := proof.NodeLevel(challSubRoot.SubrootSize/LeafSize, arity) - challOffset := (challSubRoot.SubrootOffset / LeafSize) >> uint(challLevel-1) + // Get challenge leaf in subTree + subTreeChallenge := challengedLeaf - startLeaf - log.Debugw("challSubRoot", "challSubRoot", challSubrootIdx, "challLevel", challLevel, "challOffset", challOffset) - - challSubtreeLeaf := partialTree[elemIndex{Level: challLevel, ElemOffset: challOffset}] - if challSubtreeLeaf.Hash != subrootProof.Root { - return contract.PDPVerifierProof{}, xerrors.Errorf("subtree root doesn't match partial tree leaf, %x != %x", challSubtreeLeaf.Hash, subrootProof.Root) - } - - var out contract.PDPVerifierProof - copy(out.Leaf[:], subrootProof.Leaf[:]) - out.Proof = append(out.Proof, subrootProof.Proof...) + subTreeProof, err := proof.MemtreeProof(memtree, subTreeChallenge) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to generate sub tree proof: %w", err) + } + log.Debugw("subTreeProof", "subrootProof", subTreeProof) - currentLevel := challLevel - currentOffset := challOffset + // Verify root of proof + if subTreeProof.Root != node.Hash { + return contract.IPDPTypesProof{}, xerrors.Errorf("subroot root mismatch: %x != %x", subTreeProof.Root, node.Hash) + } - for currentLevel < rootLevel { - siblingOffset := currentOffset ^ 1 + // Fetch full cached layer from DB + layerNodes, err := p.idx.GetPDPLayer(ctx, pcid) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get layer nodes: %w", err) + } - // Retrieve sibling hash from partialTree or use zero hash - siblingIndex := elemIndex{Level: currentLevel, ElemOffset: siblingOffset} - index := elemIndex{Level: currentLevel, ElemOffset: currentOffset} - siblingElem, ok := partialTree[siblingIndex] - if !ok { - return contract.PDPVerifierProof{}, xerrors.Errorf("missing sibling at level %d, offset %d", currentLevel, siblingOffset) + // Arrange snapshot layer into a byte array + var layerBytes []byte + for _, n := range layerNodes { + layerBytes = append(layerBytes, n.Hash[:]...) } - elem, ok := partialTree[index] - if !ok { - return contract.PDPVerifierProof{}, xerrors.Errorf("missing element at level %d, offset %d", currentLevel, currentOffset) + + // Create subTree from snapshot to commP (root) + mtree, err := proof.BuildSha254MemtreeFromSnapshot(layerBytes) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to build memtree from snapshot: %w", err) } - if currentOffset < siblingOffset { // left - log.Debugw("Proof", "position", index, "left-c", hex.EncodeToString(elem.Hash[:]), "right-s", hex.EncodeToString(siblingElem.Hash[:]), "out", hex.EncodeToString(shabytes(append(elem.Hash[:], siblingElem.Hash[:]...))[:])) - } else { // right - log.Debugw("Proof", "position", index, "left-s", hex.EncodeToString(siblingElem.Hash[:]), "right-c", hex.EncodeToString(elem.Hash[:]), "out", hex.EncodeToString(shabytes(append(siblingElem.Hash[:], elem.Hash[:]...))[:])) + + // Generate merkle proof from snapShot node to commP + proofs, err := proof.MemtreeProof(mtree, snapshotNodeIndex) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to generate memtree proof: %w", err) } - // Append the sibling's hash to the proof - out.Proof = append(out.Proof, siblingElem.Hash) + com, err := commcidv2.CommPFromPCidV2(pcid) + if err != nil { + return contract.IPDPTypesProof{}, xerrors.Errorf("failed to get piece commitment: %w", err) + } - // Move up to the parent node - currentOffset = currentOffset / arity - currentLevel++ - } + // Verify proof with original root + if [32]byte(com.Digest()) != proofs.Root { + return contract.IPDPTypesProof{}, xerrors.Errorf("root digest mismatch: %x != %x", com.Digest(), proofs.Root) + } - log.Debugw("proof complete", "proof", out) + out = contract.IPDPTypesProof{ + Leaf: subTreeProof.Leaf, + Proof: append(subTreeProof.Proof, proofs.Proof...), + } - rootCid, err := cid.Parse(subroots[0].Root) - if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to parse root CID: %w", err) - } - commRoot, err := commcid.CIDToPieceCommitmentV1(rootCid) - if err != nil { - return contract.PDPVerifierProof{}, xerrors.Errorf("failed to convert CID to piece commitment: %w", err) + rootDigest = proofs.Root } - var cr [LeafSize]byte - copy(cr[:], commRoot) - if !Verify(out, cr, uint64(challengedLeaf)) { - return contract.PDPVerifierProof{}, xerrors.Errorf("proof verification failed") + if !Verify(out, rootDigest, uint64(challengedLeaf)) { + return contract.IPDPTypesProof{}, xerrors.Errorf("proof verification failed") } // Return the completed proof @@ -665,7 +586,7 @@ func (p *ProveTask) getSenderAddress(ctx context.Context, match common.Address) var addressStr string err := p.db.QueryRow(ctx, `SELECT address FROM eth_keys WHERE role = 'pdp' AND address = $1 LIMIT 1`, match.Hex()).Scan(&addressStr) if err != nil { - if errors.Is(err, sql.ErrNoRows) { + if errors.Is(err, pgx.ErrNoRows) { return common.Address{}, errors.New("no sender address with role 'pdp' found") } return common.Address{}, err @@ -674,62 +595,6 @@ func (p *ProveTask) getSenderAddress(ctx context.Context, match common.Address) return address, nil } -func (p *ProveTask) cleanupDeletedRoots(ctx context.Context, proofSetID int64, pdpVerifier *contract.PDPVerifier) error { - removals, err := pdpVerifier.GetScheduledRemovals(nil, big.NewInt(proofSetID)) - if err != nil { - return xerrors.Errorf("failed to get scheduled removals: %w", err) - } - - // Execute cleanup in a transaction - ok, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - for _, removeID := range removals { - log.Debugw("cleanupDeletedRoots", "removeID", removeID) - // Get the pdp_pieceref ID for the root before deleting - var pdpPieceRefID int64 - err := tx.QueryRow(` - SELECT pdp_pieceref - FROM pdp_proofset_roots - WHERE proofset = $1 AND root_id = $2 - `, proofSetID, removeID.Int64()).Scan(&pdpPieceRefID) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - // Root already deleted, skip - continue - } - return false, xerrors.Errorf("failed to get piece ref for root %d: %w", removeID, err) - } - - // Delete the parked piece ref, this will cascade to the pdp piece ref too - _, err = tx.Exec(` - DELETE FROM parked_piece_refs - WHERE ref_id = $1 - `, pdpPieceRefID) - if err != nil { - return false, xerrors.Errorf("failed to delete parked piece ref %d: %w", pdpPieceRefID, err) - } - - // Delete the root entry - _, err = tx.Exec(` - DELETE FROM pdp_proofset_roots - WHERE proofset = $1 AND root_id = $2 - `, proofSetID, removeID) - if err != nil { - return false, xerrors.Errorf("failed to delete root %d: %w", removeID, err) - } - } - - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return xerrors.Errorf("failed to cleanup deleted roots: %w", err) - } - if !ok { - return xerrors.Errorf("database delete not committed") - } - - return nil -} - func (p *ProveTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { if len(ids) == 0 { return nil, nil @@ -754,12 +619,7 @@ func (p *ProveTask) Adder(taskFunc harmonytask.AddTaskFunc) { p.addFunc.Set(taskFunc) } -func nextPowerOfTwo(n abi.PaddedPieceSize) abi.PaddedPieceSize { - lz := bits.LeadingZeros64(uint64(n - 1)) - return 1 << (64 - lz) -} - -func Verify(proof contract.PDPVerifierProof, root [32]byte, position uint64) bool { +func Verify(proof contract.IPDPTypesProof, root [32]byte, position uint64) bool { computedHash := proof.Leaf for i := 0; i < len(proof.Proof); i++ { @@ -789,7 +649,5 @@ func shabytes(in []byte) []byte { return out[:] } -var ( - _ = harmonytask.Reg(&ProveTask{}) - _ harmonytask.TaskInterface = &ProveTask{} -) +var _ = harmonytask.Reg(&ProveTask{}) +var _ harmonytask.TaskInterface = &ProveTask{} diff --git a/tasks/pdp/task_save_cache.go b/tasks/pdp/task_save_cache.go new file mode 100644 index 000000000..df33c609d --- /dev/null +++ b/tasks/pdp/task_save_cache.go @@ -0,0 +1,655 @@ +package pdp + +import ( + "context" + "errors" + "hash" + "io" + "math" + "math/bits" + "sync" + "time" + + "github.com/ipfs/go-cid" + sha256simd "github.com/minio/sha256-simd" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-padreader" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/cachedreader" + "github.com/filecoin-project/curio/lib/commcidv2" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/market/indexstore" + "github.com/filecoin-project/curio/market/mk20" +) + +const MinSizeForCache = uint64(100 * 1024 * 1024) +const CacheReadSize = int64(4 * 1024 * 1024) + +type TaskPDPSaveCache struct { + db *harmonydb.DB + cpr *cachedreader.CachedPieceReader + idx *indexstore.IndexStore +} + +func NewTaskPDPSaveCache(db *harmonydb.DB, cpr *cachedreader.CachedPieceReader, idx *indexstore.IndexStore) *TaskPDPSaveCache { + return &TaskPDPSaveCache{ + db: db, + cpr: cpr, + idx: idx, + } +} + +func (t *TaskPDPSaveCache) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + var saveCaches []struct { + ID string `db:"id"` + PieceCid string `db:"piece_cid_v2"` + DataSetID int64 `db:"data_set_id"` + PieceRef string `db:"piece_ref"` + } + + err = t.db.Select(ctx, &saveCaches, `SELECT id, piece_cid_v2, data_set_id, piece_ref FROM pdp_pipeline WHERE save_cache_task_id = $1 AND after_save_cache = FALSE`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to select rows from pipeline: %w", err) + } + + if len(saveCaches) == 0 { + return false, xerrors.Errorf("no saveCaches found for taskID %d", taskID) + } + + if len(saveCaches) > 1 { + return false, xerrors.Errorf("multiple saveCaches found for taskID %d", taskID) + } + + sc := saveCaches[0] + + pcid, err := cid.Parse(sc.PieceCid) + if err != nil { + return false, xerrors.Errorf("failed to parse piece cid: %w", err) + } + + pi, err := mk20.GetPieceInfo(pcid) + if err != nil { + return false, xerrors.Errorf("failed to get piece info: %w", err) + } + + // Let's build the merkle Tree again (commP) and save a middle layer for fast proving + // for pieces larger than 100 MiB + if pi.RawSize > MinSizeForCache { + has, err := t.idx.HasPDPLayer(ctx, pcid) + if err != nil { + return false, xerrors.Errorf("failed to check if piece has PDP layer: %w", err) + } + + if !has { + cp := NewCommPWithSize(pi.RawSize) + reader, _, err := t.cpr.GetSharedPieceReader(ctx, pcid) + if err != nil { + return false, xerrors.Errorf("failed to get shared piece reader: %w", err) + } + defer func() { + _ = reader.Close() + }() + + n, err := io.CopyBuffer(cp, reader, make([]byte, 4<<20)) + if err != nil { + return false, xerrors.Errorf("failed to copy piece data to commP: %w", err) + } + + digest, _, lidx, snap, err := cp.DigestWithSnapShot() + if err != nil { + return false, xerrors.Errorf("failed to get piece digest: %w", err) + } + + com, err := commcidv2.NewSha2CommP(uint64(n), digest) + if err != nil { + return false, xerrors.Errorf("failed to create commP: %w", err) + } + + if !com.PCidV2().Equals(pcid) { + return false, xerrors.Errorf("commP cid does not match piece cid: %s != %s", com.PCidV2().String(), pcid.String()) + } + + leafs := make([]indexstore.NodeDigest, len(snap)) + for i, s := range snap { + leafs[i] = indexstore.NodeDigest{ + Layer: lidx, + Hash: s.Hash, + Index: int64(i), + } + } + + err = t.idx.AddPDPLayer(ctx, pcid, leafs) + if err != nil { + return false, xerrors.Errorf("failed to add PDP layer cache: %w", err) + } + } + } + + n, err := t.db.Exec(ctx, `UPDATE pdp_pipeline SET after_save_cache = TRUE, save_cache_task_id = NULL, indexing_created_at = NOW() WHERE save_cache_task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + + if n != 1 { + return false, xerrors.Errorf("failed to update pdp_pipeline: expected 1 row but %d rows updated", n) + } + + return true, nil +} + +func (t *TaskPDPSaveCache) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (t *TaskPDPSaveCache) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "PDPSaveCache", + Cost: resources.Resources{ + Cpu: 1, + Ram: 64 << 20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(2*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return t.schedule(context.Background(), taskFunc) + }), + } +} + +func (t *TaskPDPSaveCache) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var did string + err := tx.QueryRow(`SELECT id FROM pdp_pipeline + WHERE save_cache_task_id IS NULL + AND after_save_cache = FALSE + AND after_add_piece_msg = TRUE`).Scan(&did) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to query pdp_pipeline: %w", err) + } + if did == "" { + return false, xerrors.Errorf("no valid deal ID found for scheduling") + } + + _, err = tx.Exec(`UPDATE pdp_pipeline SET save_cache_task_id = $1 WHERE id = $2 AND after_save_cache = FALSE AND after_add_piece_msg = TRUE AND save_cache_task_id IS NULL`, id, did) + if err != nil { + return false, xerrors.Errorf("failed to update pdp_pipeline: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + + } + + return nil +} + +func (t *TaskPDPSaveCache) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ harmonytask.TaskInterface = &TaskPDPSaveCache{} +var _ = harmonytask.Reg(&TaskPDPSaveCache{}) + +// All the code below is a copy+paste of https://github.com/filecoin-project/go-fil-commp-hashhash/blob/master/commp.go +// with modification to output the nodes at a specific height + +// Calc is an implementation of a commP "hash" calculator, implementing the +// familiar hash.Hash interface. The zero-value of this object is ready to +// accept Write()s without further initialization. +type Calc struct { + state + mu sync.Mutex + snapShotLayerIdx int + snapshotNodes []NodeDigest + snapshotNodesMu sync.Mutex + expectedNodeCount int + maxLayer uint + maxlayerMU sync.Mutex +} +type state struct { + quadsEnqueued uint64 + layerQueues [MaxLayers + 2]chan []byte // one extra layer for the initial leaves, one more for the dummy never-to-use channel + resultCommP chan []byte + buffer []byte + size uint64 +} + +type NodeDigest struct { + Hash [32]byte // 32 bytes +} + +var _ hash.Hash = &Calc{} // make sure we are hash.Hash compliant + +// MaxLayers is the current maximum height of the rust-fil-proofs proving tree. +const MaxLayers = uint(35) // result of log2( 1 TiB / 32 ) + +// MaxPieceSize is the current maximum size of the rust-fil-proofs proving tree. +const MaxPieceSize = uint64(1 << (MaxLayers + 5)) + +// MaxPiecePayload is the maximum amount of data that one can Write() to the +// Calc object, before needing to derive a Digest(). Constrained by the value +// of MaxLayers. +const MaxPiecePayload = MaxPieceSize / 128 * 127 + +// MinPiecePayload is the smallest amount of data for which FR32 padding has +// a defined result. It is not possible to derive a Digest() before Write()ing +// at least this amount of bytes. +const MinPiecePayload = uint64(65) + +const ( + commpDigestSize = sha256simd.Size + quadPayload = 127 + bufferSize = 256 * quadPayload // FIXME: tune better, chosen by rough experiment +) + +var ( + layerQueueDepth = 32 // FIXME: tune better, chosen by rough experiment + shaPool = sync.Pool{New: func() interface{} { return sha256simd.New() }} + stackedNulPadding [MaxLayers][]byte +) + +// initialize the nul padding stack (cheap to do upfront, just MaxLayers loops) +func init() { + h := shaPool.Get().(hash.Hash) + + stackedNulPadding[0] = make([]byte, commpDigestSize) + for i := uint(1); i < MaxLayers; i++ { + h.Reset() + h.Write(stackedNulPadding[i-1]) // yes, got to... + h.Write(stackedNulPadding[i-1]) // ...do it twice + stackedNulPadding[i] = h.Sum(make([]byte, 0, commpDigestSize)) + stackedNulPadding[i][31] &= 0x3F + } + + shaPool.Put(h) +} + +// BlockSize is the amount of bytes consumed by the commP algorithm in one go. +// Write()ing data in multiples of BlockSize would obviate the need to maintain +// an internal carry buffer. The BlockSize of this module is 127 bytes. +func (cp *Calc) BlockSize() int { return quadPayload } + +// Size is the amount of bytes returned on Sum()/Digest(), which is 32 bytes +// for this module. +func (cp *Calc) Size() int { return commpDigestSize } + +// Reset re-initializes the accumulator object, clearing its state and +// terminating all background goroutines. It is safe to Reset() an accumulator +// in any state. +func (cp *Calc) Reset() { + cp.mu.Lock() + if cp.buffer != nil { + // we are resetting without digesting: close everything out to terminate + // the layer workers + close(cp.layerQueues[0]) + <-cp.resultCommP + } + cp.state = state{} // reset + cp.mu.Unlock() +} + +// Sum is a thin wrapper around Digest() and is provided solely to satisfy +// the hash.Hash interface. It panics on errors returned from Digest(). +// Note that unlike classic (hash.Hash).Sum(), calling this method is +// destructive: the internal state is reset and all goroutines kicked off +// by Write() are terminated. +func (cp *Calc) Sum(buf []byte) []byte { + commP, _, err := cp.digest() + if err != nil { + panic(err) + } + return append(buf, commP...) +} + +// Digest collapses the internal hash state and returns the resulting raw 32 +// bytes of commP and the padded piece size, or alternatively an error in +// case of insufficient accumulated state. On success invokes Reset(), which +// terminates all goroutines kicked off by Write(). +func (cp *Calc) digest() (commP []byte, paddedPieceSize uint64, err error) { + cp.mu.Lock() + + defer func() { + // reset only if we did succeed + if err == nil { + cp.state = state{} + } + cp.mu.Unlock() + }() + + if processed := cp.quadsEnqueued*quadPayload + uint64(len(cp.buffer)); processed < MinPiecePayload { + err = xerrors.Errorf( + "insufficient state accumulated: commP is not defined for inputs shorter than %d bytes, but only %d processed so far", + MinPiecePayload, processed, + ) + return + } + + // If any, flush remaining bytes padded up with zeroes + if len(cp.buffer) > 0 { + if mod := len(cp.buffer) % quadPayload; mod != 0 { + cp.buffer = append(cp.buffer, make([]byte, quadPayload-mod)...) + } + for len(cp.buffer) > 0 { + // FIXME: there is a smarter way to do this instead of 127-at-a-time, + // but that's for another PR + cp.digestQuads(cp.buffer[:127]) + cp.buffer = cp.buffer[127:] + } + } + + // This is how we signal to the bottom of the stack that we are done + // which in turn collapses the rest all the way to resultCommP + close(cp.layerQueues[0]) + + paddedPieceSize = cp.quadsEnqueued * 128 + // hacky round-up-to-next-pow2 + if bits.OnesCount64(paddedPieceSize) != 1 { + paddedPieceSize = 1 << uint(64-bits.LeadingZeros64(paddedPieceSize)) + } + + return <-cp.resultCommP, paddedPieceSize, nil +} + +// Write adds bytes to the accumulator, for a subsequent Digest(). Upon the +// first call of this method a few goroutines are started in the background to +// service each layer of the digest tower. If you wrote some data and then +// decide to abandon the object without invoking Digest(), you need to call +// Reset() to terminate all remaining background workers. Unlike a typical +// (hash.Hash).Write, calling this method can return an error when the total +// amount of bytes is about to go over the maximum currently supported by +// Filecoin. +func (cp *Calc) Write(input []byte) (int, error) { + if len(input) == 0 { + return 0, nil + } + + cp.mu.Lock() + defer cp.mu.Unlock() + + if MaxPiecePayload < + (cp.quadsEnqueued*quadPayload)+ + uint64(len(input)) { + return 0, xerrors.Errorf( + "writing additional %d bytes to the accumulator would overflow the maximum supported unpadded piece size %d", + len(input), MaxPiecePayload, + ) + } + + // just starting: initialize internal state, start first background layer-goroutine + if cp.buffer == nil { + cp.buffer = make([]byte, 0, bufferSize) + cp.resultCommP = make(chan []byte, 1) + cp.layerQueues[0] = make(chan []byte, layerQueueDepth) + cp.addLayer(0) + } + + // short Write() - just buffer it + if len(cp.buffer)+len(input) < bufferSize { + cp.buffer = append(cp.buffer, input...) + return len(input), nil + } + + totalInputBytes := len(input) + + if toSplice := bufferSize - len(cp.buffer); toSplice < bufferSize { + cp.buffer = append(cp.buffer, input[:toSplice]...) + input = input[toSplice:] + + cp.digestQuads(cp.buffer) + cp.buffer = cp.buffer[:0] + } + + for len(input) >= bufferSize { + cp.digestQuads(input[:bufferSize]) + input = input[bufferSize:] + } + + if len(input) > 0 { + cp.buffer = append(cp.buffer, input...) + } + + return totalInputBytes, nil +} + +// always called with power-of-2 amount of quads +func (cp *Calc) digestQuads(inSlab []byte) { + + quadsCount := len(inSlab) / 127 + cp.quadsEnqueued += uint64(quadsCount) + outSlab := make([]byte, quadsCount*128) + + for j := 0; j < quadsCount; j++ { + // Cycle over four(4) 31-byte groups, leaving 1 byte in between: + // 31 + 1 + 31 + 1 + 31 + 1 + 31 = 127 + input := inSlab[j*127 : (j+1)*127] + expander := outSlab[j*128 : (j+1)*128] + inputPlus1, expanderPlus1 := input[1:], expander[1:] + + // First 31 bytes + 6 bits are taken as-is (trimmed later) + // Note that copying them into the expansion buffer is mandatory: + // we will be feeding it to the workers which reuse the bottom half + // of the chunk for the result + copy(expander[:], input[:32]) + + // first 2-bit "shim" forced into the otherwise identical bitstream + expander[31] &= 0x3F + + // In: {{ C[7] C[6] }} X[7] X[6] X[5] X[4] X[3] X[2] X[1] X[0] Y[7] Y[6] Y[5] Y[4] Y[3] Y[2] Y[1] Y[0] Z[7] Z[6] Z[5]... + // Out: X[5] X[4] X[3] X[2] X[1] X[0] C[7] C[6] Y[5] Y[4] Y[3] Y[2] Y[1] Y[0] X[7] X[6] Z[5] Z[4] Z[3]... + for i := 31; i < 63; i++ { + expanderPlus1[i] = inputPlus1[i]<<2 | input[i]>>6 + } + + // next 2-bit shim + expander[63] &= 0x3F + + // In: {{ C[7] C[6] C[5] C[4] }} X[7] X[6] X[5] X[4] X[3] X[2] X[1] X[0] Y[7] Y[6] Y[5] Y[4] Y[3] Y[2] Y[1] Y[0] Z[7] Z[6] Z[5]... + // Out: X[3] X[2] X[1] X[0] C[7] C[6] C[5] C[4] Y[3] Y[2] Y[1] Y[0] X[7] X[6] X[5] X[4] Z[3] Z[2] Z[1]... + for i := 63; i < 95; i++ { + expanderPlus1[i] = inputPlus1[i]<<4 | input[i]>>4 + } + + // next 2-bit shim + expander[95] &= 0x3F + + // In: {{ C[7] C[6] C[5] C[4] C[3] C[2] }} X[7] X[6] X[5] X[4] X[3] X[2] X[1] X[0] Y[7] Y[6] Y[5] Y[4] Y[3] Y[2] Y[1] Y[0] Z[7] Z[6] Z[5]... + // Out: X[1] X[0] C[7] C[6] C[5] C[4] C[3] C[2] Y[1] Y[0] X[7] X[6] X[5] X[4] X[3] X[2] Z[1] Z[0] Y[7]... + for i := 95; i < 126; i++ { + expanderPlus1[i] = inputPlus1[i]<<6 | input[i]>>2 + } + + // the final 6 bit remainder is exactly the value of the last expanded byte + expander[127] = input[126] >> 2 + } + + cp.layerQueues[0] <- outSlab +} + +func (cp *Calc) addLayer(myIdx uint) { + // the next layer channel, which we might *not* use + if cp.layerQueues[myIdx+1] != nil { + panic("addLayer called more than once with identical idx argument") + } + cp.layerQueues[myIdx+1] = make(chan []byte, layerQueueDepth) + collectSnapshot := int(myIdx) == cp.snapShotLayerIdx-1 + go func() { + var twinHold []byte + + for { + slab, queueIsOpen := <-cp.layerQueues[myIdx] + + // the dream is collapsing + if !queueIsOpen { + defer func() { twinHold = nil }() + + // I am last + if myIdx == MaxLayers || cp.layerQueues[myIdx+2] == nil { + cp.resultCommP <- append(make([]byte, 0, 32), twinHold[0:32]...) + return + } + + if twinHold != nil { + copy(twinHold[32:64], stackedNulPadding[myIdx]) + cp.hashSlab254(0, collectSnapshot, twinHold[0:64]) + cp.layerQueues[myIdx+1] <- twinHold[0:64:64] + } + + // signal the next in line that they are done too + close(cp.layerQueues[myIdx+1]) + return + } + + var pushedWork bool + + switch { + case len(slab) > 1<<(5+myIdx): + cp.hashSlab254(myIdx, collectSnapshot, slab) + cp.layerQueues[myIdx+1] <- slab + pushedWork = true + case twinHold != nil: + copy(twinHold[32:64], slab[0:32]) + cp.hashSlab254(0, collectSnapshot, twinHold[0:64]) + cp.layerQueues[myIdx+1] <- twinHold[0:32:64] + pushedWork = true + twinHold = nil + default: + twinHold = slab[0:32:64] + } + + // Check whether we need another worker + // + // n.b. we will not blow out of the preallocated layerQueues array, + // as we disallow Write()s above a certain threshold + if pushedWork && cp.layerQueues[myIdx+2] == nil { + cp.addLayer(myIdx + 1) + } + } + }() +} + +func (cp *Calc) hashSlab254(layerIdx uint, collectSnapshot bool, slab []byte) { + h := shaPool.Get().(hash.Hash) + cp.maxlayerMU.Lock() + defer cp.maxlayerMU.Unlock() + if cp.maxLayer < layerIdx { + cp.maxLayer = layerIdx + } + stride := 1 << (5 + layerIdx) + for i := 0; len(slab) > i+stride; i += 2 * stride { + h.Reset() + h.Write(slab[i : i+32]) + h.Write(slab[i+stride : 32+i+stride]) + h.Sum(slab[i:i])[31] &= 0x3F // callers expect we will reuse-reduce-recycle + + if collectSnapshot { + d := make([]byte, 32) + copy(d, slab[i:i+32]) + cp.snapshotNodesMu.Lock() + cp.snapshotNodes = append(cp.snapshotNodes, NodeDigest{ + Hash: [32]byte(d), + }) + cp.snapshotNodesMu.Unlock() + } + } + + shaPool.Put(h) +} + +func NewCommPWithSize(size uint64) *Calc { + c := new(Calc) + c.size = size + + c.snapshotLayerIndex(size, false) + + return c +} + +func (cp *Calc) snapshotLayerIndex(size uint64, test bool) { + if size == 0 { + panic("size must be > 0") + } + + // Calculate padded piece size + padded := padreader.PaddedSize(size).Padded() + + // Calculate number of leaf nodes (each covers 128 bytes) + numLeaves := uint64(padded) / 32 + + // Total tree height: log2(numLeaves) + treeHeight := bits.Len64(numLeaves - 1) + + //Calculate layer L such that 127 * 2^L >= targetReadSize + //→ 2^L >= targetReadSize / 32 + //ratio := float64(1040384) / 32 + testRatio := float64(2032) / LeafSize // 2 KiB.UnPadded() + ProdRatio := float64(4161536) / LeafSize // 4 MiB.UnPadded() + var layer int + if test { + layer = int(math.Ceil(math.Log2(testRatio))) + } else { + layer = int(math.Ceil(math.Log2(ProdRatio))) + } + + // Clamp within tree bounds + cp.snapShotLayerIdx = layer + if layer < 0 { + cp.snapShotLayerIdx = 0 + } + if layer > treeHeight { + cp.snapShotLayerIdx = treeHeight + } + + expectedNodes := numLeaves >> uint(cp.snapShotLayerIdx) + cp.expectedNodeCount = int(expectedNodes) +} + +func (cp *Calc) DigestWithSnapShot() ([]byte, uint64, int, []NodeDigest, error) { + commp, paddedPieceSize, err := cp.digest() + if err != nil { + return nil, 0, 0, nil, err + } + + cp.snapshotNodesMu.Lock() + defer cp.snapshotNodesMu.Unlock() + + // Make output array of expected length + out := make([]NodeDigest, cp.expectedNodeCount) + + // Copy snapShot nodes to output + copied := copy(out[:len(cp.snapshotNodes)], cp.snapshotNodes) + + // Fill remaining nodes with zeroPadding + if copied != cp.expectedNodeCount { + count := cp.expectedNodeCount - copied + var h [32]byte + copy(h[:], stackedNulPadding[cp.snapShotLayerIdx]) + out = append(out, make([]NodeDigest, count)...) + for i := copied; i < len(out); i++ { + out[i].Hash = h + } + } + + return commp, paddedPieceSize, cp.snapShotLayerIdx, out, nil +} + +func NewCommPWithSizeForTest(size uint64) *Calc { + c := new(Calc) + c.size = size + + c.snapshotLayerIndex(size, true) + + return c +} diff --git a/tasks/piece/task_aggregate_chunks.go b/tasks/piece/task_aggregate_chunks.go new file mode 100644 index 000000000..ba8bc69b3 --- /dev/null +++ b/tasks/piece/task_aggregate_chunks.go @@ -0,0 +1,434 @@ +package piece + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "time" + + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/mk20" +) + +type AggregateChunksTask struct { + db *harmonydb.DB + remote *paths.Remote + sc *ffi.SealCalls +} + +func NewAggregateChunksTask(db *harmonydb.DB, remote *paths.Remote, sc *ffi.SealCalls) *AggregateChunksTask { + return &AggregateChunksTask{ + db: db, + remote: remote, + sc: sc, + } +} + +func (a *AggregateChunksTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var chunks []struct { + ID string `db:"id"` + Chunk int `db:"chunk"` + Size int64 `db:"chunk_size"` + RefID int64 `db:"ref_id"` + } + + err = a.db.Select(ctx, &chunks, ` + SELECT + id, + chunk, + chunk_size, + ref_id + FROM + market_mk20_deal_chunk + WHERE + finalize_task_id = $1 + AND complete = TRUE + AND finalize = TRUE + ORDER BY chunk ASC`, taskID) + if err != nil { + return false, xerrors.Errorf("getting chunk details: %w", err) + } + + if len(chunks) == 0 { + return false, xerrors.Errorf("no chunks to aggregate for task %d", taskID) + } + + idStr := chunks[0].ID + + id, err := ulid.Parse(idStr) + if err != nil { + return false, xerrors.Errorf("parsing deal ID: %w", err) + } + + deal, err := mk20.DealFromDB(ctx, a.db, id) + if err != nil { + return false, xerrors.Errorf("getting deal details: %w", err) + } + + pi, err := deal.PieceInfo() + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + rawSize := int64(pi.RawSize) + pcid := pi.PieceCIDV1 + psize := pi.Size + pcid2 := deal.Data.PieceCID + + var readers []io.Reader + var refIds []int64 + + for _, chunk := range chunks { + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = a.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, chunk.RefID) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + if len(pieceID) != 1 { + return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) + } + + pr, err := a.sc.PieceReader(ctx, pieceID[0].PieceID) + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + reader := pr + + defer func() { + _ = pr.Close() + }() + readers = append(readers, reader) + refIds = append(refIds, chunk.RefID) + } + + rd := io.MultiReader(readers...) + + var parkedPieceID, pieceRefID int64 + var pieceParked bool + + comm, err := a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Check if we already have the piece, if found then verify access and skip rest of the processing + var pid int64 + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = TRUE`, pcid.String(), psize).Scan(&pid) + if err == nil { + // If piece exists then check if we can access the data + pr, err := a.sc.PieceReader(ctx, storiface.PieceNumber(pid)) + if err != nil { + // If piece does not exist then we will park it otherwise fail here + if !errors.Is(err, storiface.ErrSectorNotFound) { + // We should fail here because any subsequent operation which requires access to data will also fail + // till this error is fixed + return false, fmt.Errorf("failed to get piece reader: %w", err) + } + } + defer func() { + _ = pr.Close() + }() + pieceParked = true + parkedPieceID = pid + } else { + if !errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("failed to check if piece already exists: %w", err) + } + // If piece does not exist then let's create one + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term, skip) + VALUES ($1, $2, $3, TRUE, TRUE) RETURNING id`, + pcid.String(), psize, rawSize).Scan(&parkedPieceID) + if err != nil { + return false, fmt.Errorf("failed to create parked_pieces entry: %w", err) + } + } + + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id + `, parkedPieceID, "/PUT").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("saving aggregated chunk details to DB: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed := true + var cleanupChunks bool + + // Clean up piece park tables in case of failure + // TODO: Figure out if there is a race condition with cleanup task + defer func() { + if cleanupChunks { + _, serr := a.db.Exec(ctx, `DELETE FROM market_mk20_deal_chunk WHERE id = $1`, id.String()) + if serr != nil { + log.Errorf("failed to delete market_mk20_deal_chunk entry: %w", serr) + } + _, serr = a.db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = ANY($1)`, refIds) + if serr != nil { + log.Errorf("failed to delete parked_piece_refs entry: %w", serr) + } + } + if failed { + _, ferr := a.db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, pieceRefID) + if ferr != nil { + log.Errorf("failed to delete parked_piece_refs entry: %w", ferr) + } + } + }() + + // Write piece if not already complete + if !pieceParked { + cpi, _, err := a.sc.WriteUploadPiece(ctx, storiface.PieceNumber(parkedPieceID), rawSize, rd, storiface.PathStorage, true) + if err != nil { + return false, xerrors.Errorf("writing aggregated piece data to storage: %w", err) + } + + if !cpi.PieceCID.Equals(pcid) { + cleanupChunks = true + return false, xerrors.Errorf("commP mismatch calculated %s and supplied %s", cpi.PieceCID.String(), pcid.String()) + } + + if cpi.Size != psize { + cleanupChunks = true + return false, xerrors.Errorf("commP size mismatch calculated %d and supplied %d", cpi.Size, psize) + } + } + + // Update DB status of piece, deal, PDP + comm, err = a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var refIDUsed bool + + dealdata := deal.Data + aggregation := 0 + if dealdata.Format.Aggregate != nil { + aggregation = int(dealdata.Format.Aggregate.Type) + } + + if !pieceParked { + _, err = tx.Exec(`UPDATE parked_pieces SET + complete = TRUE + WHERE id = $1 + AND complete = false`, pieceRefID) + if err != nil { + return false, xerrors.Errorf("marking piece park as complete: %w", err) + } + } + + // Update PoRep pipeline + if deal.Products.DDOV1 != nil { + var complete bool + err = tx.QueryRow(`SELECT (ddo_v1->>'complete')::boolean FROM market_mk20_deal WHERE id = $1`, id.String()).Scan(&complete) + if err != nil { + return false, fmt.Errorf("getting porep status: %w", err) + } + if !complete { + spid, err := address.IDFromAddress(deal.Products.DDOV1.Provider) + if err != nil { + return false, fmt.Errorf("getting provider ID: %w", err) + } + + var rev mk20.RetrievalV1 + if deal.Products.RetrievalV1 != nil { + rev = *deal.Products.RetrievalV1 + } + + ddo := deal.Products.DDOV1 + dealID := deal.Identifier.String() + + var allocationID interface{} + if ddo.AllocationId != nil { + allocationID = *ddo.AllocationId + } else { + allocationID = nil + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", pieceRefID), + } + + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, + piece_size, raw_size, url, offline, indexing, announce, + allocation_id, duration, piece_aggregation, deal_aggregation, started, downloaded, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, TRUE, TRUE, TRUE)`, + dealID, spid, ddo.ContractAddress, deal.Client, pcid2.String(), pcid.String(), + psize, rawSize, pieceIDUrl.String(), false, rev.Indexing, rev.AnnouncePayload, + allocationID, ddo.Duration, aggregation, aggregation) + + if err != nil { + return false, xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + + refIDUsed = true + } + } + + // Update PDP pipeline + if deal.Products.PDPV1 != nil { + var complete bool + err = tx.QueryRow(`SELECT (pdp_v1->>'complete')::boolean FROM market_mk20_deal WHERE id = $1`, id.String()).Scan(&complete) + if err != nil { + return false, fmt.Errorf("getting pdp status: %w", err) + } + if !complete { + pdp := deal.Products.PDPV1 + retv := deal.Products.RetrievalV1 + if refIDUsed { + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id + `, parkedPieceID, "/PUT").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + } + + n, err := tx.Exec(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, extra_data, piece_ref, + downloaded, deal_aggregation, aggr_index, indexing, announce, announce_payload, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, TRUE, $7, 0, $8, $9, $10, TRUE)`, + id.String(), deal.Client, deal.Data.PieceCID.String(), *pdp.DataSetID, + pdp.ExtraData, pieceRefID, aggregation, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if err != nil { + return false, xerrors.Errorf("inserting in PDP pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting in PDP pipeline: %d rows affected", n) + } + } + } + + _, err = tx.Exec(`DELETE FROM market_mk20_pipeline_waiting WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("deleting deal from mk20 pipeline waiting: %w", err) + } + + _, err = tx.Exec(`DELETE FROM market_mk20_deal_chunk WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("deleting deal chunks from mk20 deal: %w", err) + } + + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = ANY($1)`, refIds) + if err != nil { + return false, xerrors.Errorf("deleting parked piece refs: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return false, xerrors.Errorf("updating DB: %w", err) + } + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed = false + + return true, nil +} + +func (a *AggregateChunksTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (a *AggregateChunksTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "AggregateChunks", + Cost: resources.Resources{ + Cpu: 1, + Ram: 4 << 30, + }, + MaxFailures: 1, + IAmBored: passcall.Every(5*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return a.schedule(context.Background(), taskFunc) + }), + } +} + +func (a *AggregateChunksTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + // schedule submits + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + var mid string + var count int + err := a.db.QueryRow(ctx, `SELECT id, COUNT(*) AS total_chunks + FROM market_mk20_deal_chunk + GROUP BY id + HAVING + COUNT(*) = COUNT(*) FILTER ( + WHERE complete = TRUE + AND finalize = TRUE + AND finalize_task_id IS NULL + AND ref_id IS NOT NULL + ) + ORDER BY id + LIMIT 1;`).Scan(&mid, &count) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("getting next task to schedule: %w", err) + } + if mid == "" { + return false, xerrors.Errorf("no id for tasks to schedule") + } + + n, err := tx.Exec(`UPDATE market_mk20_deal_chunk SET finalize_task_id = $1 + WHERE id = $2 + AND complete = TRUE + AND finalize = TRUE + AND finalize_task_id IS NULL + AND ref_id IS NOT NULL`, id, mid) + if err != nil { + return false, xerrors.Errorf("updating chunk finalize task: %w", err) + } + if n != count { + return false, xerrors.Errorf("expected to update %d rows: %d rows affected", count, n) + } + stop = false + return true, nil + }) + } + return nil +} + +func (a *AggregateChunksTask) Adder(taskFunc harmonytask.AddTaskFunc) {} + +var _ = harmonytask.Reg(&AggregateChunksTask{}) +var _ harmonytask.TaskInterface = &AggregateChunksTask{} diff --git a/tasks/piece/task_park_piece.go b/tasks/piece/task_park_piece.go index d3b1e6ffe..4df9b7f6d 100644 --- a/tasks/piece/task_park_piece.go +++ b/tasks/piece/task_park_piece.go @@ -41,8 +41,8 @@ type ParkPieceTask struct { longTerm bool // Indicates if the task is for long-term pieces } -func NewParkPieceTask(db *harmonydb.DB, sc *ffi2.SealCalls, max int) (*ParkPieceTask, error) { - return newPieceTask(db, sc, nil, max, false) +func NewParkPieceTask(db *harmonydb.DB, sc *ffi2.SealCalls, remote *paths.Remote, max int) (*ParkPieceTask, error) { + return newPieceTask(db, sc, remote, max, false) } func NewStorePieceTask(db *harmonydb.DB, sc *ffi2.SealCalls, remote *paths.Remote, max int) (*ParkPieceTask, error) { @@ -76,6 +76,7 @@ func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) { FROM parked_pieces WHERE long_term = $1 AND complete = FALSE + AND skip = FALSE AND task_id IS NULL `, p.longTerm) if err != nil { @@ -96,7 +97,7 @@ func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) { p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { // Update n, err := tx.Exec( - `UPDATE parked_pieces SET task_id = $1 WHERE id = $2 AND complete = FALSE AND task_id IS NULL AND long_term = $3`, + `UPDATE parked_pieces SET task_id = $1 WHERE id = $2 AND complete = FALSE AND skip = FALSE AND task_id IS NULL AND long_term = $3`, id, pieceID.ID, p.longTerm) if err != nil { return false, xerrors.Errorf("updating parked piece: %w", err) diff --git a/tasks/seal/finalize_pieces.go b/tasks/seal/finalize_pieces.go index de94df8e5..35f4d9939 100644 --- a/tasks/seal/finalize_pieces.go +++ b/tasks/seal/finalize_pieces.go @@ -38,7 +38,7 @@ func DropSectorPieceRefs(ctx context.Context, db *harmonydb.DB, sid abi.SectorID continue } - n, err := db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + n, err := db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1 AND long_term = FALSE`, refID) if err != nil { log.Errorw("failed to delete piece ref", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) } diff --git a/tasks/seal/poller_commit_msg.go b/tasks/seal/poller_commit_msg.go index 312162248..68ddc65ae 100644 --- a/tasks/seal/poller_commit_msg.go +++ b/tasks/seal/poller_commit_msg.go @@ -161,7 +161,14 @@ func (s *SealPoller) pollCommitMsgLanded(ctx context.Context, task pollTask) err if err != nil { return false, xerrors.Errorf("update market_mk12_deal_pipeline: %w", err) } - log.Debugw("marked deals as sealed", "sp", task.SpID, "sector", task.SectorNumber, "count", n) + log.Debugw("marked mk12 deals as sealed", "sp", task.SpID, "sector", task.SectorNumber, "count", n) + + n, err = tx.Exec(`UPDATE market_mk20_pipeline SET sealed = TRUE WHERE sp_id = $1 AND sector = $2 AND sealed = FALSE`, task.SpID, task.SectorNumber) + if err != nil { + return false, xerrors.Errorf("update market_mk20_pipeline: %w", err) + } + log.Debugw("marked mk20 deals as sealed", "sp", task.SpID, "sector", task.SectorNumber, "count", n) + return true, nil } } diff --git a/tasks/seal/task_movestorage.go b/tasks/seal/task_movestorage.go index 5f3e57e93..206fe8acd 100644 --- a/tasks/seal/task_movestorage.go +++ b/tasks/seal/task_movestorage.go @@ -67,10 +67,20 @@ func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) } _, err = m.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // Create a indexing task - _, err = tx.Exec(`SELECT create_indexing_task($1, $2)`, taskID, "sectors_sdr_pipeline") + // Set indexing_created_at to Now() to allow new indexing tasks + _, err = tx.Exec(`UPDATE market_mk20_pipeline + SET indexing_created_at = NOW() + WHERE sp_id = $1 AND sector = $2;`, task.SpID, task.SectorNumber) if err != nil { - return false, fmt.Errorf("error creating indexing task: %w", err) + return false, fmt.Errorf("error creating indexing task for mk20 deals: %w", err) + } + + _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline + SET indexing_created_at = NOW() + WHERE sp_id = $1 AND sector = $2; + `, task.SpID, task.SectorNumber) + if err != nil { + return false, fmt.Errorf("error creating indexing task for mk12: %w", err) } _, err = tx.Exec(`UPDATE sectors_sdr_pipeline SET after_move_storage = TRUE, task_id_move_storage = NULL WHERE task_id_move_storage = $1`, taskID) diff --git a/tasks/snap/task_movestorage.go b/tasks/snap/task_movestorage.go index b1a7d60eb..ad2b7de5a 100644 --- a/tasks/snap/task_movestorage.go +++ b/tasks/snap/task_movestorage.go @@ -71,10 +71,20 @@ func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) } _, err = m.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // Create an indexing task - _, err = tx.Exec(`SELECT create_indexing_task($1, $2)`, taskID, "sectors_snap_pipeline") + // Set indexing_created_at to Now() to allow new indexing tasks + _, err = tx.Exec(`UPDATE market_mk20_pipeline + SET indexing_created_at = NOW() + WHERE sp_id = $1 AND sector = $2;`, task.SpID, task.SectorNumber) if err != nil { - return false, fmt.Errorf("error creating indexing task: %w", err) + return false, fmt.Errorf("error creating indexing task for mk20 deals: %w", err) + } + + _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline + SET indexing_created_at = NOW() + WHERE sp_id = $1 AND sector = $2; + `, task.SpID, task.SectorNumber) + if err != nil { + return false, fmt.Errorf("error creating indexing task for mk12 deals: %w", err) } _, err = tx.Exec(`UPDATE sectors_snap_pipeline SET after_move_storage = TRUE, task_id_move_storage = NULL WHERE task_id_move_storage = $1`, taskID) diff --git a/tasks/snap/task_submit.go b/tasks/snap/task_submit.go index 67e7c21e8..f7caa2f63 100644 --- a/tasks/snap/task_submit.go +++ b/tasks/snap/task_submit.go @@ -804,7 +804,13 @@ func (s *SubmitTask) updateLanded(ctx context.Context, tx *harmonydb.Tx, spId, s if err != nil { return xerrors.Errorf("update market_mk12_deal_pipeline: %w", err) } - log.Debugw("marked deals as sealed", "sp", spId, "sector", sectorNum, "count", n) + log.Debugw("marked mk12 deals as sealed", "sp", spId, "sector", sectorNum, "count", n) + + n, err = tx.Exec(`UPDATE market_mk20_pipeline SET sealed = TRUE WHERE sp_id = $1 AND sector = $2 AND sealed = FALSE`, spId, sectorNum) + if err != nil { + return xerrors.Errorf("update market_mk20_pipeline: %w", err) + } + log.Debugw("marked mk20 deals as sealed", "sp", spId, "sector", sectorNum, "count", n) } } diff --git a/tasks/storage-market/market_balance.go b/tasks/storage-market/market_balance.go index c31a5d6d0..96511ac83 100644 --- a/tasks/storage-market/market_balance.go +++ b/tasks/storage-market/market_balance.go @@ -43,20 +43,31 @@ type BalanceManager struct { } func NewBalanceManager(api mbalanceApi, miners []address.Address, cfg *config.CurioConfig, sender *message.Sender) (*BalanceManager, error) { - var disabledMiners []address.Address + var mk12disabledMiners []address.Address for _, m := range cfg.Market.StorageMarketConfig.MK12.DisabledMiners { maddr, err := address.NewFromString(m) if err != nil { return nil, xerrors.Errorf("failed to parse miner string: %s", err) } - disabledMiners = append(disabledMiners, maddr) + mk12disabledMiners = append(mk12disabledMiners, maddr) } - enabled, _ := lo.Difference(miners, disabledMiners) + mk12enabled, _ := lo.Difference(miners, mk12disabledMiners) + + var mk20disabledMiners []address.Address + for _, m := range cfg.Market.StorageMarketConfig.MK20.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return nil, xerrors.Errorf("failed to parse miner string: %s", err) + } + mk20disabledMiners = append(mk20disabledMiners, maddr) + } + mk20enabled, _ := lo.Difference(miners, mk20disabledMiners) mmap := make(map[string][]address.Address) - mmap[mk12Str] = enabled + mmap[mk12Str] = mk12enabled + mmap[mk20Str] = mk20enabled bmcfg := make(map[address.Address]config.BalanceManagerConfig) for _, a := range cfg.Addresses { if len(a.MinerAddresses) > 0 { diff --git a/tasks/storage-market/mk20.go b/tasks/storage-market/mk20.go new file mode 100644 index 000000000..401f82fd7 --- /dev/null +++ b/tasks/storage-market/mk20.go @@ -0,0 +1,1254 @@ +package storage_market + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "runtime" + "strconv" + "time" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/lib/commcidv2" + "github.com/filecoin-project/curio/market/mk20" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/proofs" + "github.com/filecoin-project/lotus/chain/types" + lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +type MK20PipelinePiece struct { + ID string `db:"id"` + SPID int64 `db:"sp_id"` + Client string `db:"client"` + Contract string `db:"contract"` + PieceCIDV2 string `db:"piece_cid_v2"` + PieceCID string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + RawSize int64 `db:"raw_size"` + Offline bool `db:"offline"` + URL *string `db:"url"` // Nullable fields use pointers + Indexing bool `db:"indexing"` + Announce bool `db:"announce"` + AllocationID *int64 `db:"allocation_id"` // Nullable fields use pointers + Duration *int64 `db:"duration"` // Nullable fields use pointers + PieceAggregation int `db:"piece_aggregation"` + + Started bool `db:"started"` + + Downloaded bool `db:"downloaded"` + + CommTaskID *int64 `db:"commp_task_id"` + AfterCommp bool `db:"after_commp"` + + DealAggregation int `db:"deal_aggregation"` + AggregationIndex int64 `db:"aggr_index"` + AggregationTaskID *int64 `db:"agg_task_id"` + Aggregated bool `db:"aggregated"` + + Sector *int64 `db:"sector"` // Nullable fields use pointers + RegSealProof *int `db:"reg_seal_proof"` // Nullable fields use pointers + SectorOffset *int64 `db:"sector_offset"` // Nullable fields use pointers + + IndexingCreatedAt *time.Time `db:"indexing_created_at"` // Nullable fields use pointers + IndexingTaskID *int64 `db:"indexing_task_id"` + Indexed bool `db:"indexed"` +} + +func (d *CurioStorageDealMarket) processMK20Deals(ctx context.Context) { + // Catch any panics if encountered as we are working with user provided data + defer func() { + if r := recover(); r != nil { + trace := make([]byte, 1<<16) + n := runtime.Stack(trace, false) + + log.Errorf("panic occurred: %v\n%s", r, trace[:n]) + } + }() + d.processMK20DealPieces(ctx) + //d.downloadMk20Deal(ctx) + d.processMK20DealAggregation(ctx) + d.processMK20DealIngestion(ctx) +} + +func (d *CurioStorageDealMarket) pipelineInsertLoop(ctx context.Context) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + d.insertDDODealInPipeline(ctx) + d.insertDealInPipelineForUpload(ctx) + } + } +} + +func (d *CurioStorageDealMarket) insertDDODealInPipeline(ctx context.Context) { + var deals []string + rows, err := d.db.Query(ctx, `SELECT id from market_mk20_pipeline_waiting`) + if err != nil { + log.Errorf("querying mk20 pipeline waiting: %s", err) + return + } + for rows.Next() { + var dealID string + err = rows.Scan(&dealID) + if err != nil { + log.Errorf("scanning mk20 pipeline waiting: %s", err) + return + } + deals = append(deals, dealID) + } + + if err := rows.Err(); err != nil { + log.Errorf("iterating over mk20 pipeline waiting: %s", err) + return + } + var dealIDs []ulid.ULID + for _, dealID := range deals { + id, err := ulid.Parse(dealID) + if err != nil { + log.Errorf("parsing deal id: %s", err) + return + } + dealIDs = append(dealIDs, id) + } + if len(dealIDs) == 0 { + return + } + for _, id := range dealIDs { + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + deal, err := mk20.DealFromTX(tx, id) + if err != nil { + return false, xerrors.Errorf("getting deal from db: %w", err) + } + err = insertPiecesInTransaction(ctx, tx, deal) + if err != nil { + return false, xerrors.Errorf("inserting pieces in db: %w", err) + } + _, err = tx.Exec(`DELETE FROM market_mk20_pipeline_waiting WHERE id = $1`, id.String()) + if err != nil { + return false, xerrors.Errorf("deleting deal from mk20 pipeline waiting: %w", err) + } + return true, nil + }) + if err != nil { + log.Errorf("inserting deal in pipeline: %s", err) + continue + } + if !comm { + log.Errorf("inserting deal in pipeline: commit failed") + continue + } + } +} + +// insertDealInPipelineForUpload start processing deals which are +// 1. Waiting for data +// 2. DataSource defined +// 3. We already have the piece +// We process both DDO and PDP deal in same function +func (d *CurioStorageDealMarket) insertDealInPipelineForUpload(ctx context.Context) { + var deals []struct { + DealID string `db:"id"` + } + err := d.db.Select(ctx, &deals, `SELECT id from market_mk20_upload_waiting WHERE chunked IS NULL AND ref_id IS NULL`) + if err != nil { + log.Errorf("querying mk20 pipeline waiting upload: %s", err) + return + } + + var dealIDs []ulid.ULID + for _, deal := range deals { + id, err := ulid.Parse(deal.DealID) + if err != nil { + log.Errorf("parsing deal id: %s", err) + return + } + dealIDs = append(dealIDs, id) + } + if len(dealIDs) == 0 { + return + } + + for _, id := range dealIDs { + _, err = d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + deal, err := mk20.DealFromTX(tx, id) + if err != nil { + return false, xerrors.Errorf("getting deal from db: %w", err) + } + + if deal.Data == nil { + return false, nil + } + + pi, err := deal.PieceInfo() + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + var pieceID int64 + // Check if already have the piece and save the user trouble to upload + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2`, pi.PieceCIDV1.String(), pi.Size).Scan(&pieceID) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + // We don't have the piece, let user upload + return false, nil + } else { + // Some other error occurred during select + return false, xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + retv := deal.Products.RetrievalV1 + data := deal.Data + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + spid, err := address.IDFromAddress(deal.Products.DDOV1.Provider) + if err != nil { + return false, fmt.Errorf("getting provider ID: %w", err) + } + + var comm bool + + // Insert DDO deal if present + if deal.Products.DDOV1 != nil { + ddo := deal.Products.DDOV1 + + var allocationID interface{} + if ddo.AllocationId != nil { + allocationID = *ddo.AllocationId + } else { + allocationID = nil + } + + // If we have the piece then create reference and insert in pipeline + var pieceRefID int64 + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id`, pieceID, "/PUT").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", pieceRefID), + } + + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, piece_size, raw_size, url, + offline, indexing, announce, allocation_id, duration, + piece_aggregation, deal_aggregation, started, downloaded, after_commp, aggregated) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, TRUE, TRUE, TRUE, TRUE)`, + id, spid, ddo.ContractAddress, deal.Client, deal.Data.PieceCID.String(), pi.PieceCIDV1.String(), pi.Size, pi.RawSize, pieceIDUrl.String(), + false, retv.Indexing, retv.AnnouncePayload, allocationID, ddo.Duration, + 0, aggregation) + if err != nil { + return false, xerrors.Errorf("inserting piece in mk20 pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting piece in mk20 pipeline: %d rows affected", n) + } + + comm = true + } + + if deal.Products.PDPV1 != nil { + pdp := deal.Products.PDPV1 + + // If we have the piece then create reference and insert in pipeline + var pieceRefID int64 + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id`, pieceID, "/PUT").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + + n, err := tx.Exec(`INSERT INTO pdp_pipeline ( + id, client, piece_cid_v2, data_set_id, extra_data, piece_ref, + downloaded, deal_aggregation, aggr_index, aggregated, indexing, announce, announce_payload, after_commp) + VALUES ($1, $2, $3, $4, $5, $6, TRUE, $7, 0, TRUE, $8, $9, $10, TRUE)`, + id, deal.Client, deal.Data.PieceCID.String(), *pdp.DataSetID, + pdp.ExtraData, pieceRefID, aggregation, retv.Indexing, retv.AnnouncePiece, retv.AnnouncePayload) + if err != nil { + return false, xerrors.Errorf("inserting piece in PDP pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting piece in PDP pipeline: %d rows affected", n) + } + comm = true + } + + return comm, nil + }) + if err != nil { + log.Errorf("inserting upload deal in pipeline: %s", err) + continue + } + } +} + +func insertPiecesInTransaction(ctx context.Context, tx *harmonydb.Tx, deal *mk20.Deal) error { + spid, err := address.IDFromAddress(deal.Products.DDOV1.Provider) + if err != nil { + return fmt.Errorf("getting provider ID: %w", err) + } + + var rev mk20.RetrievalV1 + if deal.Products.RetrievalV1 != nil { + rev = *deal.Products.RetrievalV1 + } + ddo := deal.Products.DDOV1 + data := deal.Data + dealID := deal.Identifier.String() + pi, err := deal.PieceInfo() + if err != nil { + return fmt.Errorf("getting piece info: %w", err) + } + + var allocationID interface{} + if ddo.AllocationId != nil { + allocationID = *ddo.AllocationId + } else { + allocationID = nil + } + + aggregation := 0 + if data.Format.Aggregate != nil { + aggregation = int(data.Format.Aggregate.Type) + } + + // Insert pipeline when Data source is HTTP + if data.SourceHTTP != nil { + var pieceID int64 + // Attempt to select the piece ID first + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2`, pi.PieceCIDV1.String(), pi.Size).Scan(&pieceID) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + // Piece does not exist, attempt to insert + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + VALUES ($1, $2, $3, TRUE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id`, pi.PieceCIDV1.String(), pi.Size, pi.RawSize).Scan(&pieceID) + if err != nil { + return xerrors.Errorf("inserting new parked piece and getting id: %w", err) + } + } else { + // Some other error occurred during select + return xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + var refIds []int64 + + // Add parked_piece_refs + for _, src := range data.SourceHTTP.URLs { + var refID int64 + + headers, err := json.Marshal(src.Headers) + if err != nil { + return xerrors.Errorf("marshaling headers: %w", err) + } + + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + VALUES ($1, $2, $3, TRUE) RETURNING ref_id`, pieceID, src.URL, headers).Scan(&refID) + if err != nil { + return xerrors.Errorf("inserting parked piece ref: %w", err) + } + refIds = append(refIds, refID) + } + + n, err := tx.Exec(`INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) VALUES ($1, $2, $3, $4)`, + dealID, deal.Data.PieceCID.String(), mk20.ProductNameDDOV1, refIds) + if err != nil { + return xerrors.Errorf("inserting mk20 download pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting mk20 download pipeline: %d rows affected", n) + } + + n, err = tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, + piece_size, raw_size, offline, indexing, announce, + allocation_id, duration, piece_aggregation, started) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, TRUE)`, + dealID, spid, ddo.ContractAddress, deal.Client, data.PieceCID.String(), pi.PieceCIDV1.String(), + pi.Size, pi.RawSize, false, rev.Indexing, rev.AnnouncePayload, + allocationID, ddo.Duration, aggregation) + if err != nil { + return xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + return nil + } + + // INSERT Pipeline when data source is offline + if deal.Data.SourceOffline != nil { + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, + piece_size, raw_size, offline, indexing, announce, + allocation_id, duration, piece_aggregation) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, + dealID, spid, ddo.ContractAddress, deal.Client, data.PieceCID.String(), pi.PieceCIDV1.String(), + pi.Size, pi.RawSize, true, rev.Indexing, rev.AnnouncePayload, + allocationID, ddo.Duration, aggregation) + if err != nil { + return xerrors.Errorf("inserting mk20 pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("inserting mk20 pipeline: %d rows affected", n) + } + return nil + } + + // Insert pipeline when data source is aggregate + if deal.Data.SourceAggregate != nil { + + // Find all unique pieces where data source is HTTP + type downloadkey struct { + ID string + PieceCIDV2 cid.Cid + PieceCID cid.Cid + Size abi.PaddedPieceSize + RawSize uint64 + } + toDownload := make(map[downloadkey][]mk20.HttpUrl) + + for _, piece := range deal.Data.SourceAggregate.Pieces { + spi, err := mk20.GetPieceInfo(piece.PieceCID) + if err != nil { + return xerrors.Errorf("getting piece info: %w", err) + } + if piece.SourceHTTP != nil { + urls, ok := toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size, RawSize: spi.RawSize}] + if ok { + toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size}] = append(urls, piece.SourceHTTP.URLs...) + } else { + toDownload[downloadkey{ID: dealID, PieceCIDV2: piece.PieceCID, PieceCID: spi.PieceCIDV1, Size: spi.Size, RawSize: spi.RawSize}] = piece.SourceHTTP.URLs + } + } + } + + batch := &pgx.Batch{} + batchSize := 5000 + + for k, v := range toDownload { + for _, src := range v { + headers, err := json.Marshal(src.Headers) + if err != nil { + return xerrors.Errorf("marshal headers: %w", err) + } + batch.Queue(`WITH inserted_piece AS ( + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + VALUES ($1, $2, $3, FALSE) + ON CONFLICT (piece_cid, piece_padded_size, long_term, cleanup_task_id) DO NOTHING + RETURNING id + ), + selected_piece AS ( + SELECT COALESCE( + (SELECT id FROM inserted_piece), + (SELECT id FROM parked_pieces + WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = FALSE AND cleanup_task_id IS NULL) + ) AS id + ), + inserted_ref AS ( + INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + SELECT id, $4, $5, FALSE FROM selected_piece + RETURNING ref_id + ) + INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) + VALUES ($6, $8, $7, ARRAY[(SELECT ref_id FROM inserted_ref)]) + ON CONFLICT (id, piece_cid_v2, product) DO UPDATE + SET ref_ids = array_append( + market_mk20_download_pipeline.ref_ids, + (SELECT ref_id FROM inserted_ref) + ) + WHERE NOT market_mk20_download_pipeline.ref_ids @> ARRAY[(SELECT ref_id FROM inserted_ref)];`, + k.PieceCID.String(), k.Size, k.RawSize, src.URL, headers, k.ID, mk20.ProductNameDDOV1, k.PieceCIDV2.String()) + } + + if batch.Len() > batchSize { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing parked piece query batch: %w", err) + } + batch = &pgx.Batch{} + } + } + + if batch.Len() > 0 { + res := tx.SendBatch(ctx, batch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing parked piece query batch: %w", err) + } + } + + pBatch := &pgx.Batch{} + pBatchSize := 4000 + for i, piece := range deal.Data.SourceAggregate.Pieces { + var offline bool + if piece.SourceOffline != nil { + offline = true + } + spi, err := mk20.GetPieceInfo(piece.PieceCID) + if err != nil { + return xerrors.Errorf("getting piece info: %w", err) + } + pBatch.Queue(`INSERT INTO market_mk20_pipeline (id, sp_id, contract, client, piece_cid_v2, piece_cid, + piece_size, raw_size, offline, indexing, announce, allocation_id, duration, + piece_aggregation, deal_aggregation, aggr_index, started) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, + dealID, spid, ddo.ContractAddress, deal.Client, piece.PieceCID.String(), spi.PieceCIDV1.String(), + spi.Size, spi.RawSize, offline, rev.Indexing, rev.AnnouncePayload, allocationID, ddo.Duration, + 0, aggregation, i, !offline) + if pBatch.Len() > pBatchSize { + res := tx.SendBatch(ctx, pBatch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing mk20 pipeline insert batch: %w", err) + } + pBatch = &pgx.Batch{} + } + } + if pBatch.Len() > 0 { + res := tx.SendBatch(ctx, pBatch) + if err := res.Close(); err != nil { + return xerrors.Errorf("closing mk20 pipeline insert batch: %w", err) + } + } + return nil + } + + return xerrors.Errorf("unknown data source type") +} + +func (d *CurioStorageDealMarket) processMK20DealPieces(ctx context.Context) { + var pieces []MK20PipelinePiece + err := d.db.Select(ctx, &pieces, `SELECT + id, + sp_id, + contract, + client, + piece_cid_v2, + piece_cid, + piece_size, + raw_size, + offline, + url, + indexing, + announce, + allocation_id, + duration, + piece_aggregation, + started, + downloaded, + commp_task_id, + after_commp, + deal_aggregation, + aggr_index, + agg_task_id, + aggregated, + sector, + reg_seal_proof, + sector_offset, + indexing_created_at, + indexing_task_id, + indexed + FROM + market_mk20_pipeline + WHERE complete = false ORDER BY created_at ASC; + `) + if err != nil { + log.Errorw("failed to get deals from DB", "error", err) + return + } + + for _, piece := range pieces { + err := d.processMk20Pieces(ctx, piece) + if err != nil { + log.Errorw("failed to process deal", "ID", piece.ID, "SP", piece.SPID, "Contract", piece.Contract, "Piece CID", piece.PieceCID, "Piece Size", piece.PieceSize, "error", err) + continue + } + } + +} + +func (d *CurioStorageDealMarket) processMk20Pieces(ctx context.Context, piece MK20PipelinePiece) error { + err := d.downloadMk20Deal(ctx, piece) + if err != nil { + return err + } + + err = d.findOfflineURLMk20Deal(ctx, piece) + if err != nil { + return err + } + + err = d.createCommPMk20Piece(ctx, piece) + if err != nil { + return err + } + + err = d.addDealOffset(ctx, piece) + if err != nil { + return err + } + + return nil +} + +// downloadMk20Deal handles the downloading process of an MK20 pipeline piece by scheduling it in the database and updating its status. +// If the pieces are part of an aggregation deal then we download for short term otherwise, +// we download for long term to avoid the need to have unsealed copy +func (d *CurioStorageDealMarket) downloadMk20Deal(ctx context.Context, piece MK20PipelinePiece) error { + //n, err := d.db.Exec(ctx, `SELECT mk20_ddo_mark_downloaded($1)`, mk20.ProductNameDDOV1) + //if err != nil { + // log.Errorf("failed to mark PDP downloaded piece: %v", err) + // + //} + //log.Debugf("Succesfully marked %d PDP pieces as downloaded", n) + + if !piece.Downloaded && piece.Started { + _, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var refid int64 + err = tx.QueryRow(`SELECT u.ref_id FROM ( + SELECT unnest(dp.ref_ids) AS ref_id + FROM market_mk20_download_pipeline dp + WHERE dp.id = $1 AND dp.piece_cid = $2 AND dp.piece_size = $3 AND dp.product = $4 + ) u + JOIN parked_piece_refs pr ON pr.ref_id = u.ref_id + JOIN parked_pieces pp ON pp.id = pr.piece_id + WHERE pp.complete = TRUE + LIMIT 1;`, piece.ID, piece.PieceCID, piece.PieceSize, mk20.ProductNameDDOV1).Scan(&refid) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } + return false, xerrors.Errorf("failed to check if the piece is downloaded: %w", err) + } + + // Remove other ref_ids from piece_park_refs + _, err = tx.Exec(`DELETE FROM parked_piece_refs + WHERE ref_id IN ( + SELECT unnest(dp.ref_ids) + FROM market_mk20_download_pipeline dp + WHERE dp.id = $1 AND dp.piece_cid = $2 AND dp.piece_size = $3 AND dp.product = $4 + ) + AND ref_id != $5;`, piece.ID, piece.PieceCID, piece.PieceSize, mk20.ProductNameDDOV1, refid) + if err != nil { + return false, xerrors.Errorf("failed to remove other ref_ids from piece_park_refs: %w", err) + } + + _, err = tx.Exec(`DELETE FROM market_mk20_download_pipeline WHERE id = $1 AND piece_cid = $2 AND piece_size = $3 AND product = $4;`, + piece.ID, piece.PieceCID, piece.PieceSize, mk20.ProductNameDDOV1) + if err != nil { + return false, xerrors.Errorf("failed to delete piece from download table: %w", err) + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", refid), + } + + _, err = tx.Exec(`UPDATE market_mk20_pipeline SET downloaded = TRUE, url = $1 + WHERE id = $2 + AND piece_cid = $3 + AND piece_size = $4`, + pieceIDUrl.String(), piece.ID, piece.PieceCID, piece.PieceSize) + if err != nil { + return false, xerrors.Errorf("failed to update pipeline piece table: %w", err) + } + piece.Downloaded = true + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return xerrors.Errorf("failed to schedule the deal for download: %w", err) + } + } + return nil +} + +// findOfflineURLMk20Deal find the URL for offline piece. In MK20, we don't work directly with remote pieces, we download them +// locally and then decide to aggregate, long term or remove them +func (d *CurioStorageDealMarket) findOfflineURLMk20Deal(ctx context.Context, piece MK20PipelinePiece) error { + if piece.Offline && !piece.Downloaded && !piece.Started { + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var updated bool + err = tx.QueryRow(`SELECT process_offline_download($1, $2, $3, $4, $5)`, piece.ID, piece.PieceCIDV2, piece.PieceCID, piece.PieceSize, mk20.ProductNameDDOV1).Scan(&updated) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return false, xerrors.Errorf("failed to start download for offline deal %s: %w", piece.ID, err) + } + } + + if updated { + return true, nil + } + + // Check if We can find the URL for this piece on remote servers + for rUrl, headers := range d.urls { + // Create a new HTTP request + urlString := fmt.Sprintf("%s?id=%s", rUrl, piece.PieceCIDV2) + req, err := http.NewRequest(http.MethodHead, urlString, nil) + if err != nil { + return false, xerrors.Errorf("error creating request: %w", err) + } + + req.Header = headers + + // Create a client and make the request + client := &http.Client{ + Timeout: 10 * time.Second, + } + resp, err := client.Do(req) + if err != nil { + return false, xerrors.Errorf("error making GET request: %w", err) + } + + // Check the response code for 404 + if resp.StatusCode != http.StatusOK { + if resp.StatusCode != 404 { + return false, xerrors.Errorf("not ok response from HTTP server: %s", resp.Status) + } + continue + } + + hdrs, err := json.Marshal(headers) + if err != nil { + return false, xerrors.Errorf("marshaling headers: %w", err) + } + + rawSizeStr := resp.Header.Get("Content-Length") + if rawSizeStr == "" { + continue + } + rawSize, err := strconv.ParseInt(rawSizeStr, 10, 64) + if err != nil { + return false, xerrors.Errorf("failed to parse the raw size: %w", err) + } + + if rawSize != piece.RawSize { + continue + } + + if abi.PaddedPieceSize(piece.PieceSize) != padreader.PaddedSize(uint64(rawSize)).Padded() { + continue + } + + _, err = tx.Exec(`WITH pipeline_piece AS ( + SELECT id, piece_cid, piece_size, deal_aggregation + FROM market_mk20_pipeline + WHERE id = $1 AND piece_cid = $2 AND piece_size = $3 + ), + existing_piece AS ( + SELECT id AS piece_id + FROM parked_pieces + WHERE piece_cid = $2 AND piece_padded_size = $3 + ), + inserted_piece AS ( + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term) + SELECT $2, $3, $4, NOT (p.deal_aggregation > 0) + FROM pipeline_piece p + WHERE NOT EXISTS (SELECT 1 FROM existing_piece) + RETURNING id AS piece_id + ), + selected_piece AS ( + SELECT piece_id FROM existing_piece + UNION ALL + SELECT piece_id FROM inserted_piece + ), + inserted_ref AS ( + INSERT INTO parked_piece_refs (piece_id, data_url, data_headers, long_term) + SELECT + s.piece_id, + $5, + $6, + NOT (p.deal_aggregation > 0) + FROM selected_piece s + JOIN pipeline_piece p ON true + RETURNING ref_id + ), + upsert_pipeline AS ( + INSERT INTO market_mk20_download_pipeline (id, piece_cid_v2, product, ref_ids) + SELECT $1, $8, $7, array_agg(ref_id) + FROM inserted_ref + ON CONFLICT (id, piece_cid_v2, product) DO UPDATE + SET ref_ids = ( + SELECT array( + SELECT DISTINCT r + FROM unnest(market_mk20_download_pipeline.ref_ids || excluded.ref_ids) AS r + ) + ) + ) + UPDATE market_mk20_pipeline + SET started = TRUE + WHERE id = $1 AND piece_cid = $2 AND piece_size = $3 AND started = FALSE;`, + piece.ID, piece.PieceCID, piece.PieceSize, rawSize, urlString, hdrs, mk20.ProductNameDDOV1, piece.PieceCIDV2) + if err != nil { + return false, xerrors.Errorf("failed to start download for offline deal using PieceLocator: %w", err) + } + + return true, nil + } + return false, nil + + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("deal %s: %w", piece.ID, err) + } + + if comm { + log.Infow("URL attached for offline deal piece", "deal piece", piece) + } + } + + return nil +} + +// createCommPMk20Piece handles the creation of a CommP task for an MK20 pipeline piece, updating its status based on piece attributes. +func (d *CurioStorageDealMarket) createCommPMk20Piece(ctx context.Context, piece MK20PipelinePiece) error { + if piece.Downloaded && !piece.AfterCommp && piece.CommTaskID == nil { + // Skip commP is configured to do so + if d.cfg.Market.StorageMarketConfig.MK20.SkipCommP { + _, err := d.db.Exec(ctx, `UPDATE market_mk20_pipeline SET after_commp = TRUE, commp_task_id = NULL + WHERE id = $1 + AND sp_id = $2 + AND piece_cid = $3 + AND piece_size = $4 + AND raw_size = $5 + AND aggr_index = $6 + AND downloaded = TRUE + AND after_commp = FALSE`, piece.ID, piece.SPID, piece.PieceCID, piece.PieceSize, piece.RawSize, piece.AggregationIndex) + if err != nil { + return xerrors.Errorf("marking piece as after commP: %w", err) + } + log.Debugw("commP skipped successfully", "deal piece", piece) + return nil + } + + if d.adders[pollerCommP].IsSet() { + d.adders[pollerCommP].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { + // update + n, err := tx.Exec(`UPDATE market_mk20_pipeline SET commp_task_id = $1 + WHERE id = $2 + AND sp_id = $3 + AND piece_cid = $4 + AND piece_size = $5 + AND raw_size = $6 + AND aggr_index = $7 + AND downloaded = TRUE + AND after_commp = FALSE + AND commp_task_id IS NULL`, id, piece.ID, piece.SPID, piece.PieceCID, piece.PieceSize, piece.RawSize, piece.AggregationIndex) + if err != nil { + return false, xerrors.Errorf("creating commP task for deal piece: %w", err) + } + + if n > 0 { + log.Debugw("commP task created successfully", "deal piece", piece) + } + + // commit only if we updated the piece + return n > 0, nil + }) + } + + return nil + } + return nil +} + +func (d *CurioStorageDealMarket) addDealOffset(ctx context.Context, piece MK20PipelinePiece) error { + // Get the deal offset if sector has started sealing + if piece.Sector != nil && piece.RegSealProof != nil && piece.SectorOffset == nil { + _, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + type pieces struct { + Cid string `db:"piece_cid"` + Size abi.PaddedPieceSize `db:"piece_size"` + Index int64 `db:"piece_index"` + } + + var pieceList []pieces + err = tx.Select(&pieceList, `SELECT piece_cid, piece_size, piece_index + FROM sectors_sdr_initial_pieces + WHERE sp_id = $1 AND sector_number = $2 + + UNION ALL + + SELECT piece_cid, piece_size, piece_index + FROM sectors_snap_initial_pieces + WHERE sp_id = $1 AND sector_number = $2 + + ORDER BY piece_index ASC;`, piece.SPID, piece.Sector) + if err != nil { + return false, xerrors.Errorf("getting pieces for sector: %w", err) + } + + if len(pieceList) == 0 { + // Sector might be waiting for more deals + return false, nil + } + + var offset abi.UnpaddedPieceSize + + for _, p := range pieceList { + _, padLength := proofs.GetRequiredPadding(offset.Padded(), p.Size) + offset += padLength.Unpadded() + if p.Cid == piece.PieceCID && p.Size == abi.PaddedPieceSize(piece.PieceSize) { + n, err := tx.Exec(`UPDATE market_mk20_pipeline SET sector_offset = $1 WHERE id = $2 AND sector = $3 AND sector_offset IS NULL`, offset.Padded(), piece.ID, piece.Sector) + if err != nil { + return false, xerrors.Errorf("updating deal offset: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected to update 1 deal, updated %d", n) + } + offset += p.Size.Unpadded() + return true, nil + } + + } + return false, xerrors.Errorf("failed to find deal offset for piece %s", piece.PieceCID) + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("failed to get deal offset: %w", err) + } + } + return nil +} + +func (d *CurioStorageDealMarket) processMK20DealAggregation(ctx context.Context) { + if !d.adders[pollerAggregate].IsSet() { + return + } + + var deals []struct { + ID string `db:"id"` + Count int `db:"count"` + } + + err := d.db.Select(ctx, &deals, `SELECT id, COUNT(*) AS count + FROM market_mk20_pipeline + GROUP BY id + HAVING bool_and(after_commp) + AND bool_and(NOT aggregated) + AND bool_and(agg_task_id IS NULL);`) + if err != nil { + log.Errorf("getting deals to aggregate: %w", err) + return + } + + for _, deal := range deals { + log.Infow("processing aggregation task", "deal", deal.ID, "count", deal.Count) + d.adders[pollerAggregate].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk20_pipeline SET agg_task_id = $1 + WHERE id = $2 + AND started = TRUE + AND downloaded = TRUE + AND after_commp = TRUE + AND aggregated = FALSE + AND agg_task_id IS NULL`, id, deal.ID) + if err != nil { + return false, xerrors.Errorf("creating aggregation task for deal: %w", err) + } + + if n == deal.Count { + log.Infow("aggregation task created successfully", "deal", deal.ID) + } + + return n == deal.Count, nil + }) + } + +} + +func (d *CurioStorageDealMarket) processMK20DealIngestion(ctx context.Context) { + + head, err := d.api.ChainHead(ctx) + if err != nil { + log.Errorf("getting chain head: %w", err) + return + } + + var deals []struct { + ID string `db:"id"` + SPID int64 `db:"sp_id"` + Client string `db:"client"` + PieceCID string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + RawSize int64 `db:"raw_size"` + AllocationID *int64 `db:"allocation_id"` + Duration int64 `db:"duration"` + Url string `db:"url"` + Count int `db:"unassigned_count"` + } + + err = d.db.Select(ctx, &deals, `SELECT + id, + MIN(sp_id) AS sp_id, + MIN(client) AS client, + MIN(piece_cid) AS piece_cid, + MIN(piece_size) AS piece_size, + MIN(raw_size) AS raw_size, + MIN(allocation_id) AS allocation_id, + MIN(duration) AS duration, + MIN(url) AS url, + COUNT(*) AS unassigned_count + FROM market_mk20_pipeline + WHERE aggregated = TRUE AND sector IS NULL + GROUP BY id;`) + if err != nil { + log.Errorf("getting deals for ingestion: %w", err) + return + } + + for _, deal := range deals { + if deal.Count != 1 { + log.Errorf("unexpected count for deal: %s", deal.ID) + continue + } + + pcid, err := cid.Parse(deal.PieceCID) + if err != nil { + log.Errorw("failed to parse aggregate piece cid", "deal", deal, "error", err) + continue + } + + client, err := address.NewFromString(deal.Client) + if err != nil { + log.Errorw("failed to parse client address", "deal", deal, "error", err) + continue + } + + clientIdAddr, err := d.api.StateLookupID(ctx, client, types.EmptyTSK) + if err != nil { + log.Errorw("failed to lookup client id", "deal", deal, "error", err) + } + + clientId, err := address.IDFromAddress(clientIdAddr) + if err != nil { + log.Errorw("failed to parse client id", "deal", deal, "error", err) + continue + } + + aurl, err := url.Parse(deal.Url) + if err != nil { + log.Errorf("failed to parse aggregate url: %w", err) + continue + } + if aurl.Scheme != "pieceref" { + log.Errorw("aggregate url is not a pieceref: %s", deal) + continue + } + + start := head.Height() + 2*builtin.EpochsInDay + end := start + abi.ChainEpoch(deal.Duration) + var vak *miner.VerifiedAllocationKey + if deal.AllocationID != nil { + alloc, err := d.api.StateGetAllocation(ctx, client, verifreg.AllocationId(*deal.AllocationID), types.EmptyTSK) + if err != nil { + log.Errorw("failed to get allocation", "deal", deal, "error", err) + continue + } + if alloc == nil { + log.Errorw("allocation not found", "deal", deal, "error", err) + continue + } + if alloc.Expiration < start { + log.Errorw("allocation expired", "deal", deal, "error", err) + continue + } + end = start + alloc.TermMin + vak = &miner.VerifiedAllocationKey{ + Client: abi.ActorID(clientId), + ID: verifreg13.AllocationId(*deal.AllocationID), + } + } + + // TODO: Attach notifications + pdi := lpiece.PieceDealInfo{ + DealSchedule: lpiece.DealSchedule{ + StartEpoch: start, + EndEpoch: end, + }, + PieceActivationManifest: &miner.PieceActivationManifest{ + CID: pcid, + Size: abi.PaddedPieceSize(deal.PieceSize), + VerifiedAllocationKey: vak, + }, + } + + maddr, err := address.NewIDAddress(uint64(deal.SPID)) + if err != nil { + log.Errorw("failed to parse miner address", "deal", deal, "error", err) + continue + } + + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + sector, sp, err := d.pin.AllocatePieceToSector(ctx, tx, maddr, pdi, deal.RawSize, *aurl, nil) + if err != nil { + return false, xerrors.Errorf("failed to allocate piece to sector: %w", err) + } + + n, err := tx.Exec(`UPDATE market_mk20_pipeline SET sector = $1, reg_seal_proof = $2 WHERE id = $3`, *sector, *sp, deal.ID) + if err != nil { + return false, xerrors.Errorf("failed to update deal: %w", err) + } + + return n == 1, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorf("failed to commit transaction: %s", err) + continue + } + if comm { + log.Infow("deal ingested successfully", "deal", deal) + } else { + log.Infow("deal not ingested", "deal", deal) + } + } +} + +func (d *CurioStorageDealMarket) migratePieceCIDV2(ctx context.Context) { + ticker := time.NewTicker(1 * time.Hour) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + d.migratePcid(ctx) + } + } +} + +func (d *CurioStorageDealMarket) migratePcid(ctx context.Context) { + // Migrate ipni_chunks table + var pieceCIDs []struct { + PieceCID string `db:"piece_cid"` + } + err := d.db.Select(ctx, &pieceCIDs, `SELECT piece_cid FROM ipni_chunks`) + if err != nil { + log.Errorf("failed to get piece CIDs: %w", err) + return + } + + for _, pieceCID := range pieceCIDs { + pcid, err := cid.Parse(pieceCID.PieceCID) + if err != nil { + log.Errorf("failed to parse piece CID: %w", err) + continue + } + isPcid2 := commcidv2.IsPieceCidV2(pcid) + if isPcid2 { + continue + } + + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Check that table market_piece_metadata has a single entry for this piece cid + var count int + err = tx.QueryRow(`SELECT COUNT(*) AS count FROM market_piece_metadata WHERE piece_cid = $1`, pieceCID.PieceCID).Scan(&count) + if err != nil { + return false, xerrors.Errorf("failed to get piece metadata: %w", err) + } + if count != 1 { + return false, xerrors.Errorf("expected to find a single piece metadata entry for piece cid %s", pieceCID.PieceCID) + } + // Get raw size from market_piece_deal table for this piece CID + var rawSize uint64 + err = tx.QueryRow(`SELECT raw_size FROM market_piece_deal WHERE piece_cid = $1`, pieceCID.PieceCID).Scan(&rawSize) + if err != nil { + log.Errorf("failed to get piece deal: %w", err) + } + + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, rawSize) + if err != nil { + return false, xerrors.Errorf("failed to convert to piece cid v2: %w", err) + } + + // Update ipni_chunks table with correct entry + _, err = tx.Exec(`UPDATE ipni_chunks SET piece_cid = $1 WHERE piece_cid = $2`, pcid2.String(), pieceCID.PieceCID) + if err != nil { + return false, xerrors.Errorf("failed to update ipni_chunks table: %w", err) + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + log.Errorf("failed to commit transaction: %s", err) + continue + } + if comm { + log.Debugw("piece CID migrated successfully", "piece CID", pieceCID.PieceCID) + } else { + log.Debugw("piece CID not migrated", "piece CID", pieceCID.PieceCID) + } + } + + // Add PieceCIDv2 to ipni table + var pieceInfos []struct { + PieceCID string `db:"piece_cid"` + Size int64 `db:"size"` + RawSize int64 `db:"raw_size"` + } + err = d.db.Select(ctx, &pieceInfos, `SELECT + i.piece_cid, + i.piece_size, + mpd.raw_size + FROM ipni AS i + JOIN LATERAL ( + SELECT d.raw_size + FROM market_piece_deal AS d + WHERE d.piece_cid = i.piece_cid + AND d.piece_length = i.piece_size + LIMIT 1 + ) AS mpd ON true + WHERE i.piece_cid_v2 IS NULL;`) + if err != nil { + log.Errorf("failed to get piece infos: %w", err) + return + } + for _, pieceInfo := range pieceInfos { + pcid, err := cid.Parse(pieceInfo.PieceCID) + if err != nil { + log.Errorf("failed to parse piece CID: %w", err) + } + + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(pieceInfo.RawSize)) + if err != nil { + log.Errorf("failed to convert to piece cid v2: %w", err) + } + + _, err = d.db.Exec(ctx, `UPDATE ipni SET piece_cid_v2 = $1 WHERE piece_cid = $2 AND piece_size = $3`, pcid2.String(), pieceInfo.PieceCID, pieceInfo.Size) + if err != nil { + log.Errorf("failed to update ipni table: %w", err) + } + } +} diff --git a/tasks/storage-market/storage_market.go b/tasks/storage-market/storage_market.go index 927e07b07..831d6c194 100644 --- a/tasks/storage-market/storage_market.go +++ b/tasks/storage-market/storage_market.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/yugabyte/pgx/v5" @@ -28,14 +29,16 @@ import ( "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/promise" "github.com/filecoin-project/curio/market/mk12" "github.com/filecoin-project/curio/market/mk12/legacytypes" + "github.com/filecoin-project/curio/market/mk20" "github.com/filecoin-project/curio/market/storageingest" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/proofs" "github.com/filecoin-project/lotus/storage/pipeline/piece" ) @@ -51,11 +54,12 @@ const ( pollerCommP = iota pollerPSD pollerFindDeal + pollerAggregate numPollers ) -const dealPollerInterval = 30 * time.Second +const dealPollerInterval = 3 * time.Second type storageMarketAPI interface { mk12.MK12API @@ -66,13 +70,16 @@ type CurioStorageDealMarket struct { cfg *config.CurioConfig db *harmonydb.DB pin storageingest.Ingester - miners map[string][]address.Address + miners []address.Address api storageMarketAPI MK12Handler *mk12.MK12 + MK20Handler *mk20.MK20 + ethClient *ethclient.Client si paths.SectorIndex urls map[string]http.Header adders [numPollers]promise.Promise[harmonytask.AddTaskFunc] as *multictladdr.MultiAddressSelector + sc *ffi.SealCalls } type MK12Pipeline struct { @@ -109,10 +116,7 @@ type MK12Pipeline struct { Offset *int64 `db:"sector_offset"` } -func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg *config.CurioConfig, si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector) *CurioStorageDealMarket { - - moduleMap := make(map[string][]address.Address) - moduleMap[mk12Str] = append(moduleMap[mk12Str], miners...) +func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg *config.CurioConfig, ethClient *ethclient.Client, si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) *CurioStorageDealMarket { urls := make(map[string]http.Header) for _, curl := range cfg.Market.StorageMarketConfig.PieceLocator { @@ -120,64 +124,65 @@ func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg * } return &CurioStorageDealMarket{ - cfg: cfg, - db: db, - api: mapi, - miners: moduleMap, - si: si, - urls: urls, - as: as, + cfg: cfg, + db: db, + api: mapi, + miners: miners, + si: si, + urls: urls, + as: as, + ethClient: ethClient, + sc: sc, } } func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { var err error - for module, miners := range d.miners { - if module == mk12Str { - if len(miners) == 0 { - // Do not start the poller if no minerID present - return nil - } - d.MK12Handler, err = mk12.NewMK12Handler(miners, d.db, d.si, d.api, d.cfg, d.as) - if err != nil { - return err - } + d.MK12Handler, err = mk12.NewMK12Handler(d.miners, d.db, d.si, d.api, d.cfg, d.as) + if err != nil { + return err + } - if d.MK12Handler != nil { - for _, miner := range miners { - _, err = d.MK12Handler.GetAsk(ctx, miner) - if err != nil { - if strings.Contains(err.Error(), "no ask found") { - if build.BuildType != build.BuildMainnet && build.BuildType != build.BuildCalibnet { - err = d.MK12Handler.SetAsk(ctx, abi.NewTokenAmount(0), abi.NewTokenAmount(0), miner, legacytypes.MinPieceSize(abi.PaddedPieceSize(128)), legacytypes.MaxPieceSize(abi.PaddedPieceSize(8<<20))) - if err != nil { - return xerrors.Errorf("failed to set ask for miner %s: %w", miner, err) - } - } else { - err = d.MK12Handler.SetAsk(ctx, abi.NewTokenAmount(45211226852), abi.NewTokenAmount(0), miner) - if err != nil { - return xerrors.Errorf("failed to set ask for miner %s: %w", miner, err) - } - } - } else { - return xerrors.Errorf("failed to get miner ask %s: %w", miner, err) + if d.MK12Handler != nil { + for _, miner := range d.miners { + _, err = d.MK12Handler.GetAsk(ctx, miner) + if err != nil { + if strings.Contains(err.Error(), "no ask found") { + if build.BuildType != build.BuildMainnet && build.BuildType != build.BuildCalibnet { + err = d.MK12Handler.SetAsk(ctx, abi.NewTokenAmount(0), abi.NewTokenAmount(0), miner, legacytypes.MinPieceSize(abi.PaddedPieceSize(128)), legacytypes.MaxPieceSize(abi.PaddedPieceSize(8<<20))) + if err != nil { + return xerrors.Errorf("failed to set ask for miner %s: %w", miner, err) + } + } else { + err = d.MK12Handler.SetAsk(ctx, abi.NewTokenAmount(45211226852), abi.NewTokenAmount(0), miner) + if err != nil { + return xerrors.Errorf("failed to set ask for miner %s: %w", miner, err) } } + } else { + return xerrors.Errorf("failed to get miner ask %s: %w", miner, err) } } - - if d.cfg.Ingest.DoSnap { - d.pin, err = storageingest.NewPieceIngesterSnap(ctx, d.db, d.api, miners, d.cfg) - } else { - d.pin, err = storageingest.NewPieceIngester(ctx, d.db, d.api, miners, d.cfg) - } } } + d.MK20Handler, err = mk20.NewMK20Handler(d.miners, d.db, d.si, d.api, d.ethClient, d.cfg, d.as, d.sc) if err != nil { return err } + + if len(d.miners) > 0 { + if d.cfg.Ingest.DoSnap { + d.pin, err = storageingest.NewPieceIngesterSnap(ctx, d.db, d.api, d.miners, d.cfg) + } else { + d.pin, err = storageingest.NewPieceIngester(ctx, d.db, d.api, d.miners, d.cfg) + } + if err != nil { + return err + } + } + go d.runPoller(ctx) return nil @@ -185,6 +190,10 @@ func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { } func (d *CurioStorageDealMarket) runPoller(ctx context.Context) { + // Start thread to insert mk20 DDO deals into pipeline + go d.pipelineInsertLoop(ctx) + go d.migratePieceCIDV2(ctx) + ticker := time.NewTicker(dealPollerInterval) defer ticker.Stop() @@ -218,13 +227,8 @@ func (d *CurioStorageDealMarket) poll(ctx context.Context) { 5. Once commP is complete, send PSD and find the allocated deal ID 6. Add the deal using pieceIngest */ - for module, miners := range d.miners { - if module == mk12Str { - if len(miners) > 0 { - d.processMK12Deals(ctx) - } - } - } + d.processMK12Deals(ctx) + d.processMK20Deals(ctx) } func (d *CurioStorageDealMarket) processMK12Deals(ctx context.Context) { @@ -497,21 +501,36 @@ func (d *CurioStorageDealMarket) findURLForOfflineDeals(ctx context.Context, dea var updated bool err = tx.QueryRow(` WITH selected_data AS ( - SELECT url, headers, raw_size - FROM market_offline_urls - WHERE uuid = $1 + SELECT url, headers, raw_size + FROM market_offline_urls + WHERE uuid = $1 + ), + updated_pipeline AS ( + UPDATE market_mk12_deal_pipeline + SET url = selected_data.url, + headers = selected_data.headers, + raw_size = selected_data.raw_size, + started = TRUE + FROM selected_data + WHERE market_mk12_deal_pipeline.uuid = $1 + RETURNING uuid + ), + updated_deals AS ( + UPDATE market_mk12_deals + SET raw_size = selected_data.raw_size + FROM selected_data + WHERE market_mk12_deals.uuid = $1 + RETURNING uuid + ), + updated_direct_deals AS ( + UPDATE market_direct_deals + SET raw_size = selected_data.raw_size + FROM selected_data + WHERE market_direct_deals.uuid = $1 + RETURNING uuid ) - UPDATE market_mk12_deal_pipeline - SET url = selected_data.url, - headers = selected_data.headers, - raw_size = selected_data.raw_size, - started = TRUE - FROM selected_data - WHERE market_mk12_deal_pipeline.uuid = $1 - RETURNING CASE - WHEN EXISTS (SELECT 1 FROM selected_data) THEN TRUE - ELSE FALSE - END;`, deal).Scan(&updated) + SELECT + (EXISTS (SELECT 1 FROM selected_data)) AS updated;`, deal).Scan(&updated) if err != nil { if !errors.Is(err, pgx.ErrNoRows) { return false, xerrors.Errorf("failed to update the pipeline for deal %s: %w", deal, err) @@ -570,6 +589,16 @@ func (d *CurioStorageDealMarket) findURLForOfflineDeals(ctx context.Context, dea return false, xerrors.Errorf("store url for piece %s: updating pipeline: %w", pcid, err) } + _, err = tx.Exec(`UPDATE market_mk12_deals SET raw_size = $1 WHERE uuid = $2`, rawSize, deal) + if err != nil { + return false, xerrors.Errorf("store url for piece %s: updating deals: %w", pcid, err) + } + + _, err = tx.Exec(`UPDATE market_direct_deals SET raw_size = $1 WHERE uuid = $2`, rawSize, deal) + if err != nil { + return false, xerrors.Errorf("store url for piece %s: updating direct deals: %w", pcid, err) + } + return true, nil } return false, nil @@ -694,10 +723,10 @@ func (d *CurioStorageDealMarket) ingestDeal(ctx context.Context, deal MK12Pipeli StartEpoch: abi.ChainEpoch(dbdeal.StartEpoch), EndEpoch: abi.ChainEpoch(dbdeal.EndEpoch), }, - PieceActivationManifest: &miner.PieceActivationManifest{ + PieceActivationManifest: &lminer.PieceActivationManifest{ CID: pcid, Size: abi.PaddedPieceSize(dbdeal.PieceSize), - VerifiedAllocationKey: &miner.VerifiedAllocationKey{ + VerifiedAllocationKey: &lminer.VerifiedAllocationKey{ Client: abi.ActorID(clientId), ID: verifreg.AllocationId(dbdeal.AllocationID), }, diff --git a/tasks/storage-market/task_aggregation.go b/tasks/storage-market/task_aggregation.go new file mode 100644 index 000000000..ec86eadf0 --- /dev/null +++ b/tasks/storage-market/task_aggregation.go @@ -0,0 +1,379 @@ +package storage_market + +import ( + "context" + "errors" + "fmt" + "io" + "math/bits" + "net/url" + "strconv" + + "github.com/ipfs/go-cid" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-data-segment/datasegment" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/mk20" +) + +type AggregateDealTask struct { + sm *CurioStorageDealMarket + db *harmonydb.DB + sc *ffi.SealCalls + stor paths.StashStore + api headAPI +} + +func NewAggregateTask(sm *CurioStorageDealMarket, db *harmonydb.DB, sc *ffi.SealCalls, stor paths.StashStore, api headAPI) *AggregateDealTask { + return &AggregateDealTask{ + sm: sm, + db: db, + sc: sc, + stor: stor, + api: api, + } +} + +func (a *AggregateDealTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var pieces []struct { + Pcid string `db:"piece_cid"` + Psize int64 `db:"piece_size"` + RawSize int64 `db:"raw_size"` + URL string `db:"url"` + ID string `db:"id"` + SpID int64 `db:"sp_id"` + AggrIndex int `db:"aggr_index"` + Aggregated bool `db:"aggregated"` + Aggregation int `db:"deal_aggregation"` + } + + err = a.db.Select(ctx, &pieces, ` + SELECT + piece_cid, + piece_size, + raw_size, + url, + id, + sp_id, + aggr_index, + aggregated, + deal_aggregation + FROM + market_mk20_pipeline + WHERE + agg_task_id = $1 ORDER BY aggr_index ASC`, taskID) + if err != nil { + return false, xerrors.Errorf("getting piece details: %w", err) + } + + if len(pieces) == 0 { + return false, xerrors.Errorf("no pieces to aggregate for task %d", taskID) + } + + if len(pieces) == 1 { + n, err := a.db.Exec(ctx, `UPDATE market_mk20_pipeline SET aggregated = TRUE, agg_task_id = NULL + WHERE id = $1 + AND agg_task_id = $2`, pieces[0].ID, taskID) + if err != nil { + return false, xerrors.Errorf("updating aggregated piece details in DB: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row updated, got %d", n) + } + log.Infof("skipping aggregation as deal %s only has 1 piece for task %s", pieces[0].ID, taskID) + return true, nil + } + + id := pieces[0].ID + spid := pieces[0].SpID + + ID, err := ulid.Parse(id) + if err != nil { + return false, xerrors.Errorf("parsing deal ID: %w", err) + } + + deal, err := mk20.DealFromDB(ctx, a.db, ID) + if err != nil { + return false, xerrors.Errorf("getting deal details from DB: %w", err) + } + + pi, err := deal.PieceInfo() + if err != nil { + return false, xerrors.Errorf("getting piece info: %w", err) + } + + var pinfos []abi.PieceInfo + var readers []io.Reader + + var refIDs []int64 + + for _, piece := range pieces { + if piece.Aggregated { + return false, xerrors.Errorf("piece %s for deal %s already aggregated for task %d", piece.Pcid, piece.ID, taskID) + } + if piece.Aggregation != 1 { + return false, xerrors.Errorf("incorrect aggregation value for piece %s for deal %s for task %d", piece.Pcid, piece.ID, taskID) + } + if piece.ID != id || piece.SpID != spid { + return false, xerrors.Errorf("piece details do not match") + } + goUrl, err := url.Parse(piece.URL) + if err != nil { + return false, xerrors.Errorf("parsing data URL: %w", err) + } + if goUrl.Scheme != "pieceref" { + return false, xerrors.Errorf("invalid data URL scheme: %s", goUrl.Scheme) + } + + var reader io.Reader // io.ReadCloser is not supported by padreader + var closer io.Closer + + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return false, xerrors.Errorf("parsing piece reference number: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = a.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + if len(pieceID) != 1 { + return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) + } + + pr, err := a.sc.PieceReader(ctx, pieceID[0].PieceID) + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + closer = pr + reader = pr + defer func() { + _ = closer.Close() + }() + + pcid, err := cid.Parse(piece.Pcid) + if err != nil { + return false, xerrors.Errorf("parsing piece cid: %w", err) + } + + pinfos = append(pinfos, abi.PieceInfo{ + Size: abi.PaddedPieceSize(piece.Psize), + PieceCID: pcid, + }) + + readers = append(readers, io.LimitReader(reader, piece.RawSize)) + refIDs = append(refIDs, refNum) + } + + _, aggregatedRawSize, err := datasegment.ComputeDealPlacement(pinfos) + if err != nil { + return false, xerrors.Errorf("computing aggregated piece size: %w", err) + } + + overallSize := abi.PaddedPieceSize(aggregatedRawSize) + // we need to make this the 'next' power of 2 in order to have space for the index + next := 1 << (64 - bits.LeadingZeros64(uint64(overallSize+256))) + + aggr, err := datasegment.NewAggregate(abi.PaddedPieceSize(next), pinfos) + if err != nil { + return false, xerrors.Errorf("creating aggregate: %w", err) + } + + outR, err := aggr.AggregateObjectReader(readers) + if err != nil { + return false, xerrors.Errorf("aggregating piece readers: %w", err) + } + + var parkedPieceID, pieceRefID int64 + var pieceParked bool + + comm, err := a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // TODO: Review this logic for pieces which are not complete + // Check if we already have the piece, if found then verify access and skip rest of the processing + var pid int64 + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2 AND long_term = TRUE`, pi.PieceCIDV1.String(), pi.Size).Scan(&pid) + if err == nil { + // If piece exists then check if we can access the data + pr, err := a.sc.PieceReader(ctx, storiface.PieceNumber(pid)) + if err != nil { + // If piece does not exist then we will park it otherwise fail here + if !errors.Is(err, storiface.ErrSectorNotFound) { + // We should fail here because any subsequent operation which requires access to data will also fail + // till this error is fixed + return false, fmt.Errorf("failed to get piece reader: %w", err) + } + } + defer func() { + _ = pr.Close() + }() + pieceParked = true + parkedPieceID = pid + } else { + if !errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("failed to check if piece already exists: %w", err) + } + // If piece does not exist then let's create one + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size, long_term, skip) + VALUES ($1, $2, $3, TRUE, TRUE) RETURNING id`, + pi.PieceCIDV1.String(), pi.Size, pi.RawSize).Scan(&parkedPieceID) + if err != nil { + return false, fmt.Errorf("failed to create parked_pieces entry: %w", err) + } + } + + err = tx.QueryRow(` + INSERT INTO parked_piece_refs (piece_id, data_url, long_term) + VALUES ($1, $2, TRUE) RETURNING ref_id + `, parkedPieceID, "/Aggregate").Scan(&pieceRefID) + if err != nil { + return false, fmt.Errorf("failed to create parked_piece_refs entry: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("saving aggregated chunk details to DB: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed := true + + // Clean up piece park tables in case of failure + // TODO: Figure out if there is a race condition with cleanup task + defer func() { + if failed { + _, ferr := a.db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, pieceRefID) + if ferr != nil { + log.Errorf("failed to delete parked_piece_refs entry: %w", ferr) + } + } + }() + + // Write piece if not already complete + if !pieceParked { + upi, _, err := a.sc.WriteUploadPiece(ctx, storiface.PieceNumber(parkedPieceID), int64(pi.RawSize), outR, storiface.PathStorage, true) + if err != nil { + return false, xerrors.Errorf("writing aggregated piece data to storage: %w", err) + } + + if !upi.PieceCID.Equals(pi.PieceCIDV1) { + return false, xerrors.Errorf("commP mismatch calculated %s and supplied %s", upi.PieceCID.String(), pi.PieceCIDV1.String()) + } + + if upi.Size != pi.Size { + return false, xerrors.Errorf("commP size mismatch calculated %d and supplied %d", upi.Size, pi.Size) + } + } + + comm, err = a.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", pieceRefID), + } + + // Replace the pipeline piece with a new aggregated piece + _, err = tx.Exec(`DELETE FROM market_mk20_pipeline WHERE id = $1`, id) + if err != nil { + return false, fmt.Errorf("failed to delete pipeline pieces: %w", err) + } + + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = ANY($1) AND long_term = FALSE`, refIDs) + if err != nil { + return false, fmt.Errorf("failed to delete parked_piece_refs entries: %w", err) + } + + _, err = tx.Exec(`UPDATE parked_pieces SET complete = true WHERE id = $1 AND complete = FALSE`, parkedPieceID) + if err != nil { + return false, fmt.Errorf("failed to mark piece as complete: %w", err) + } + + var rev mk20.RetrievalV1 + if deal.Products.RetrievalV1 != nil { + rev = *deal.Products.RetrievalV1 + } + + ddo := deal.Products.DDOV1 + data := deal.Data + + var allocationID interface{} + if ddo.AllocationId != nil { + allocationID = *ddo.AllocationId + } else { + allocationID = nil + } + + n, err := tx.Exec(`INSERT INTO market_mk20_pipeline ( + id, sp_id, contract, client, piece_cid_v2, piece_cid, piece_size, raw_size, url, + offline, indexing, announce, allocation_id, duration, + piece_aggregation, deal_aggregation, started, downloaded, after_commp, aggregated) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, TRUE, TRUE, TRUE, TRUE)`, + id, spid, ddo.ContractAddress, deal.Client, deal.Data.PieceCID.String(), pi.PieceCIDV1.String(), pi.Size, pi.RawSize, pieceIDUrl.String(), + false, rev.Indexing, rev.AnnouncePayload, allocationID, ddo.Duration, + data.Format.Aggregate.Type, data.Format.Aggregate.Type) + if err != nil { + return false, xerrors.Errorf("inserting aggregated piece in mk20 pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("inserting aggregated piece in mk20 pipeline: %d rows affected", n) + } + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("saving aggregated piece details to DB: %w", err) + } + + if !comm { + return false, xerrors.Errorf("failed to commit the transaction") + } + + failed = false + + return true, nil +} + +func (a *AggregateDealTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + // If no local pieceRef was found then just return first TaskID + return &ids[0], nil +} + +func (a *AggregateDealTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(50), + Name: "AggregateDeals", + Cost: resources.Resources{ + Cpu: 1, + Ram: 4 << 30, + }, + MaxFailures: 3, + } +} + +func (a *AggregateDealTask) Adder(taskFunc harmonytask.AddTaskFunc) { + a.sm.adders[pollerAggregate].Set(taskFunc) +} + +var _ = harmonytask.Reg(&AggregateDealTask{}) +var _ harmonytask.TaskInterface = &AggregateDealTask{} diff --git a/tasks/storage-market/task_commp.go b/tasks/storage-market/task_commp.go index 5a518011b..c5b8607cd 100644 --- a/tasks/storage-market/task_commp.go +++ b/tasks/storage-market/task_commp.go @@ -50,32 +50,64 @@ func (c *CommpTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done ctx := context.Background() var pieces []struct { - Pcid string `db:"piece_cid"` - Psize int64 `db:"piece_size"` - UUID string `db:"uuid"` - URL *string `db:"url"` - Headers json.RawMessage `db:"headers"` - RawSize int64 `db:"raw_size"` + Pcid string `db:"piece_cid"` + Psize int64 `db:"piece_size"` + RawSize int64 `db:"raw_size"` + URL *string `db:"url"` + Headers json.RawMessage `db:"headers"` + ID string `db:"id"` + SpID int64 `db:"sp_id"` + MK12Piece bool `db:"mk12_source_table"` + AggrIndex int64 `db:"aggr_index"` } - err = c.db.Select(ctx, &pieces, `SELECT uuid, url, headers, raw_size, piece_cid, piece_size - FROM market_mk12_deal_pipeline WHERE commp_task_id = $1`, taskID) - + err = c.db.Select(ctx, &pieces, `SELECT + uuid AS id, + url, + headers, + raw_size, + piece_cid, + piece_size, + sp_id, + 0 AS aggr_index, + TRUE AS mk12_source_table + FROM + market_mk12_deal_pipeline + WHERE + commp_task_id = $1 + + UNION ALL + + SELECT + id, + url, + NULL AS headers, + raw_size, + piece_cid, + piece_size, + sp_id, + aggr_index, + FALSE AS mk12_source_table + FROM + market_mk20_pipeline + WHERE + commp_task_id = $1`, taskID) if err != nil { return false, xerrors.Errorf("getting piece details: %w", err) } - if len(pieces) != 1 { return false, xerrors.Errorf("expected 1 piece, got %d", len(pieces)) } piece := pieces[0] - expired, err := checkExpiry(ctx, c.db, c.api, piece.UUID, c.sm.pin.GetExpectedSealDuration()) - if err != nil { - return false, xerrors.Errorf("deal %s expired: %w", piece.UUID, err) - } - if expired { - return true, nil + if piece.MK12Piece { + expired, err := checkExpiry(ctx, c.db, c.api, piece.ID, c.sm.pin.GetExpectedSealDuration()) + if err != nil { + return false, xerrors.Errorf("deal %s expired: %w", piece.ID, err) + } + if expired { + return true, nil + } } if piece.URL != nil { @@ -196,7 +228,24 @@ func (c *CommpTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("commP mismatch calculated %s and supplied %s", pcid, calculatedCommp.PieceCID) } - n, err := c.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET after_commp = TRUE, psd_wait_time = NOW(), commp_task_id = NULL WHERE commp_task_id = $1`, taskID) + var n int + + if piece.MK12Piece { + n, err = c.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET after_commp = TRUE, psd_wait_time = NOW(), commp_task_id = NULL WHERE commp_task_id = $1`, taskID) + } else { + n, err = c.db.Exec(ctx, `UPDATE market_mk20_pipeline SET after_commp = TRUE, commp_task_id = NULL + WHERE id = $1 + AND sp_id = $2 + AND piece_cid = $3 + AND piece_size = $4 + AND raw_size = $5 + AND aggr_index = $6 + AND downloaded = TRUE + AND after_commp = FALSE + AND commp_task_id = $7`, + piece.ID, piece.SpID, piece.Pcid, piece.Psize, piece.RawSize, piece.AggrIndex, taskID) + } + if err != nil { return false, xerrors.Errorf("store commp success: updating deal pipeline: %w", err) } @@ -207,7 +256,11 @@ func (c *CommpTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return true, nil } - return false, xerrors.Errorf("failed to find URL for the piece %s in the db", piece.Pcid) + if piece.MK12Piece { + return false, xerrors.Errorf("failed to find URL for the piece %s in the db", piece.Pcid) + } + + return false, xerrors.Errorf("failed to find URL for the mk20 deal piece with id %s, SP %d, CID %s, Size %d and Index %d in the db", piece.ID, piece.SpID, piece.Pcid, piece.Psize, piece.AggrIndex) } @@ -218,20 +271,12 @@ func (c *CommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task // ParkPiece should be scheduled on same node which has the piece // Remote HTTP ones can be scheduled on any node - if true { - // TODO make this a setting - id := ids[0] - return &id, nil - } - ctx := context.Background() var tasks []struct { - TaskID harmonytask.TaskID `db:"commp_task_id"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - Url *string `db:"url"` + TaskID harmonytask.TaskID `db:"commp_task_id"` + StorageID string `db:"storage_id"` + Url *string `db:"url"` } indIDs := make([]int64, len(ids)) @@ -240,9 +285,24 @@ func (c *CommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task } comm, err := c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - err = tx.Select(&tasks, ` - SELECT commp_task_id, sp_id, sector_number, url FROM market_mk12_deal_pipeline - WHERE commp_task_id = ANY ($1)`, indIDs) + err = tx.Select(&tasks, ` SELECT + commp_task_id, + url + FROM + market_mk12_deal_pipeline + WHERE + commp_task_id = ANY ($1) + + UNION ALL + + SELECT + commp_task_id, + url + FROM + market_mk20_pipeline + WHERE + commp_task_id = ANY ($1); + `, indIDs) if err != nil { return false, xerrors.Errorf("failed to get deal details from DB: %w", err) } @@ -276,7 +336,7 @@ func (c *CommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task err = tx.QueryRow(` SELECT storage_id FROM sector_location - WHERE miner_id = $1 AND sector_num = $2 AND l.sector_filetype = 32`, task.SpID, pieceID[0].PieceID).Scan(&sLocation) + WHERE miner_id = 0 AND sector_num = $1 AND sector_filetype = 32`, pieceID[0].PieceID).Scan(&sLocation) if err != nil { return false, xerrors.Errorf("failed to get storage location from DB: %w", err) @@ -286,6 +346,7 @@ func (c *CommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task } } } + return true, nil }, harmonydb.OptionRetry()) diff --git a/web/api/webrpc/actor_charts.go b/web/api/webrpc/actor_charts.go index 4e136b870..42adeae76 100644 --- a/web/api/webrpc/actor_charts.go +++ b/web/api/webrpc/actor_charts.go @@ -2,6 +2,7 @@ package webrpc import ( "context" + "fmt" "sort" "github.com/samber/lo" @@ -29,14 +30,16 @@ type SectorBucket struct { } type SectorBuckets struct { - All []SectorBucket - CC []SectorBucket + All []SectorBucket + CC []SectorBucket + BlockDelaySeconds int } func (a *WebRPC) ActorCharts(ctx context.Context, maddr address.Address) (*SectorBuckets, error) { out := SectorBuckets{ - All: []SectorBucket{}, - CC: []SectorBucket{}, + All: []SectorBucket{}, + CC: []SectorBucket{}, + BlockDelaySeconds: int(build.BlockDelaySecs), } stor := store.ActorStore(ctx, @@ -106,8 +109,13 @@ func (a *WebRPC) ActorCharts(ctx context.Context, maddr address.Address) (*Secto if sector.VerifiedDealWeight.GreaterThan(abi.NewStoragePower(0)) { weight = big.Div(big.Mul(sector.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(sector.Expiration-sector.PowerBaseEpoch))) } + + fmt.Println("Sector Number", sector.SectorNumber, "Weight", weight) + sb.QAP = big.Add(sb.QAP, weight) + fmt.Println("Sector Number", sector.SectorNumber, "QAP", sb.QAP) + if sector.DealWeight.Equals(abi.NewStoragePower(0)) && sector.VerifiedDealWeight.Equals(abi.NewStoragePower(0)) { sbc, ok := bucketsMapCC[bucket] if !ok { @@ -162,7 +170,7 @@ func (a *WebRPC) prepExpirationBucket(out []SectorBucket, now *types.TipSet) ([] totalCount := lo.Reduce(out, func(acc int64, b SectorBucket, _ int) int64 { return acc + b.Count }, int64(0)) - totalPower := lo.Reduce(out, func(agg big.Int, b SectorBucket, _ int) big.Int { return big.Add(agg, b.QAP) }, big.Zero()) + //totalPower := lo.Reduce(out, func(agg big.Int, b SectorBucket, _ int) big.Int { return big.Add(agg, b.QAP) }, big.Zero()) if len(out) == 0 { return out, nil @@ -179,13 +187,15 @@ func (a *WebRPC) prepExpirationBucket(out []SectorBucket, now *types.TipSet) ([] } for i := range out { + fmt.Println("Bucket", i, "Epoch", out[i].BucketEpoch, "Count", out[i].Count, "QAP", out[i].QAP, "VestedLockedFunds", out[i].VestedLockedFunds) newTotal := totalCount - out[i].Count out[i].Count = newTotal totalCount = newTotal - newTotalPower := big.Sub(totalPower, out[i].QAP) - out[i].QAP = newTotalPower - totalPower = newTotalPower + //newTotalPower := big.Sub(totalPower, out[i].QAP) + //fmt.Println("Bucket", i, "New Total Power", newTotalPower.String()) + //out[i].QAP = newTotalPower + //totalPower = newTotalPower epochsToExpiry := out[i].BucketEpoch - now.Height() secsToExpiry := int64(epochsToExpiry) * int64(build.BlockDelaySecs) diff --git a/web/api/webrpc/deals.go b/web/api/webrpc/deals.go index 2648eae75..510fa6c2a 100644 --- a/web/api/webrpc/deals.go +++ b/web/api/webrpc/deals.go @@ -4,9 +4,13 @@ import ( "context" "time" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/market/storageingest" "github.com/filecoin-project/lotus/chain/types" @@ -17,18 +21,20 @@ type OpenDealInfo struct { SectorNumber uint64 `db:"sector_number"` PieceCID string `db:"piece_cid"` PieceSize uint64 `db:"piece_size"` + RawSize uint64 `db:"data_raw_size"` CreatedAt time.Time `db:"created_at"` SnapDeals bool `db:"is_snap"` PieceSizeStr string `db:"-"` CreatedAtStr string `db:"-"` + PieceCidV2 string `db:"-"` Miner string } func (a *WebRPC) DealsPending(ctx context.Context) ([]OpenDealInfo, error) { deals := []OpenDealInfo{} - err := a.deps.DB.Select(ctx, &deals, `SELECT sp_id, sector_number, piece_cid, piece_size, created_at, is_snap FROM open_sector_pieces ORDER BY created_at DESC`) + err := a.deps.DB.Select(ctx, &deals, `SELECT sp_id, sector_number, piece_cid, piece_size, data_raw_size, created_at, is_snap FROM open_sector_pieces ORDER BY created_at DESC`) if err != nil { return nil, err } @@ -41,6 +47,15 @@ func (a *WebRPC) DealsPending(ctx context.Context) ([]OpenDealInfo, error) { return nil, err } deals[i].Miner = maddr.String() + pcid, err := cid.Parse(deals[i].PieceCID) + if err != nil { + return nil, xerrors.Errorf("failed to parse piece cid: %w", err) + } + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, deals[i].RawSize) + if err != nil { + return nil, xerrors.Errorf("failed to get commp: %w", err) + } + deals[i].PieceCidV2 = pcid2.String() } return deals, nil diff --git a/web/api/webrpc/ipni.go b/web/api/webrpc/ipni.go index 9ae0e1f3c..004a390db 100644 --- a/web/api/webrpc/ipni.go +++ b/web/api/webrpc/ipni.go @@ -12,10 +12,15 @@ import ( "time" "github.com/ipfs/go-cid" + "github.com/samber/lo" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/commcidv2" + itype "github.com/filecoin-project/curio/market/ipni/types" + "github.com/filecoin-project/curio/market/mk20" ) type IpniAd struct { @@ -36,7 +41,8 @@ type IpniAd struct { EntryCount int64 `json:"entry_count"` CIDCount int64 `json:"cid_count"` - AdCids []string `db:"-" json:"ad_cids"` + AdCids []string `db:"-" json:"ad_cids"` + PieceCidV2 string `db:"-" json:"piece_cid_v2"` } func (a *WebRPC) GetAd(ctx context.Context, ad string) (*IpniAd, error) { @@ -90,21 +96,58 @@ func (a *WebRPC) GetAd(ctx context.Context, ad string) (*IpniAd, error) { details := ads[0] - var pi abi.PieceInfo - err = pi.UnmarshalCBOR(bytes.NewReader(details.ContextID)) - if err != nil { - return nil, xerrors.Errorf("failed to unmarshal piece info: %w", err) + var pcid, pcid2 cid.Cid + var psize int64 + + if details.SpID == -1 { + var pi itype.PdpIpniContext + err = pi.Unmarshal(details.ContextID) + if err != nil { + return nil, xerrors.Errorf("failed to unmarshal PDP piece info: %w", err) + } + pcid2 = pi.PieceCID + pInfo, err := mk20.GetPieceInfo(pcid2) + if err != nil { + return nil, xerrors.Errorf("failed to get piece info: %w", err) + } + pcid = pInfo.PieceCIDV1 + psize = int64(pInfo.Size) + } else { + var pi abi.PieceInfo + err = pi.UnmarshalCBOR(bytes.NewReader(details.ContextID)) + if err != nil { + return nil, xerrors.Errorf("failed to unmarshal piece info: %w", err) + } + + pcid = pi.PieceCID + psize = int64(pi.Size) + + // Get RawSize from market_piece_deal to calculate PieceCidV2 + var rawSize uint64 + err = a.deps.DB.QueryRow(ctx, `SELECT raw_size FROM market_piece_deal WHERE piece_cid = $1 AND piece_length = $2 LIMIT 1;`, pi.PieceCID, pi.Size).Scan(&rawSize) + if err != nil { + return nil, xerrors.Errorf("failed to get raw size: %w", err) + } + + pcid2, err = commcidv2.PieceCidV2FromV1(pi.PieceCID, rawSize) + if err != nil { + return nil, xerrors.Errorf("failed to get commp: %w", err) + } } - details.PieceCid = pi.PieceCID.String() - size := int64(pi.Size) - details.PieceSize = size + details.PieceCid = pcid.String() + details.PieceSize = psize + details.PieceCidV2 = pcid2.String() - maddr, err := address.NewIDAddress(uint64(details.SpID)) - if err != nil { - return nil, err + if details.SpID == -1 { + details.Miner = "PDP" + } else { + maddr, err := address.NewIDAddress(uint64(details.SpID)) + if err != nil { + return nil, err + } + details.Miner = maddr.String() } - details.Miner = maddr.String() if !details.PreviousAd.Valid { details.Previous = "" @@ -123,7 +166,7 @@ func (a *WebRPC) GetAd(ctx context.Context, ad string) (*IpniAd, error) { CIDCount int64 `db:"cid_count"` } - err = a.deps.DB.Select(ctx, &adEntryInfo, `SELECT count(1) as entry_count, sum(num_blocks) as cid_count from ipni_chunks where piece_cid=$1`, details.PieceCid) + err = a.deps.DB.Select(ctx, &adEntryInfo, `SELECT count(1) as entry_count, sum(num_blocks) as cid_count from ipni_chunks where piece_cid=$1`, details.PieceCidV2) if err != nil { return nil, xerrors.Errorf("failed to fetch the ad entry count from DB: %w", err) } @@ -190,11 +233,15 @@ func (a *WebRPC) IPNISummary(ctx context.Context) ([]*IPNI, error) { } for i := range summary { - maddr, err := address.NewIDAddress(uint64(summary[i].SpId)) - if err != nil { - return nil, fmt.Errorf("failed to convert ID address: %w", err) + if summary[i].SpId == -1 { + summary[i].Miner = "PDP" + } else { + maddr, err := address.NewIDAddress(uint64(summary[i].SpId)) + if err != nil { + return nil, fmt.Errorf("failed to convert ID address: %w", err) + } + summary[i].Miner = maddr.String() } - summary[i].Miner = maddr.String() } type minimalIpniInfo struct { @@ -222,7 +269,7 @@ func (a *WebRPC) IPNISummary(ctx context.Context) ([]*IPNI, error) { return nil, fmt.Errorf("failed to fetch IPNI configuration: %w", err) } - for _, service := range services { + for _, service := range lo.Uniq(services) { for _, d := range summary { url := service + "/providers/" + d.PeerID resp, err := http.Get(url) diff --git a/web/api/webrpc/market.go b/web/api/webrpc/market.go index d496c0ef3..0b1e33b9c 100644 --- a/web/api/webrpc/market.go +++ b/web/api/webrpc/market.go @@ -5,6 +5,7 @@ import ( "context" "database/sql" "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -13,6 +14,7 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/oklog/ulid" "github.com/samber/lo" "github.com/snadrus/must" "github.com/yugabyte/pgx/v5" @@ -23,6 +25,9 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/commcidv2" + itype "github.com/filecoin-project/curio/market/ipni/types" + "github.com/filecoin-project/curio/market/mk20" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" @@ -88,32 +93,33 @@ func (a *WebRPC) SetStorageAsk(ctx context.Context, ask *StorageAsk) error { } type MK12Pipeline struct { - UUID string `db:"uuid" json:"uuid"` - SpID int64 `db:"sp_id" json:"sp_id"` - Started bool `db:"started" json:"started"` - PieceCid string `db:"piece_cid" json:"piece_cid"` - PieceSize int64 `db:"piece_size" json:"piece_size"` - RawSize *int64 `db:"raw_size" json:"raw_size"` - Offline bool `db:"offline" json:"offline"` - URL *string `db:"url" json:"url"` - Headers []byte `db:"headers" json:"headers"` - CommTaskID *int64 `db:"commp_task_id" json:"commp_task_id"` - AfterCommp bool `db:"after_commp" json:"after_commp"` - PSDTaskID *int64 `db:"psd_task_id" json:"psd_task_id"` - AfterPSD bool `db:"after_psd" json:"after_psd"` - PSDWaitTime *time.Time `db:"psd_wait_time" json:"psd_wait_time"` - FindDealTaskID *int64 `db:"find_deal_task_id" json:"find_deal_task_id"` - AfterFindDeal bool `db:"after_find_deal" json:"after_find_deal"` - Sector *int64 `db:"sector" json:"sector"` - Offset *int64 `db:"sector_offset" json:"sector_offset"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - Indexed bool `db:"indexed" json:"indexed"` - Announce bool `db:"announce" json:"announce"` - Complete bool `db:"complete" json:"complete"` - Miner string `json:"miner"` + UUID string `db:"uuid" json:"uuid"` + SpID int64 `db:"sp_id" json:"sp_id"` + Started bool `db:"started" json:"started"` + PieceCid string `db:"piece_cid" json:"piece_cid"` + PieceSize int64 `db:"piece_size" json:"piece_size"` + PieceCidV2 string `db:"-" json:"piece_cid_v2"` + RawSize sql.NullInt64 `db:"raw_size" json:"raw_size"` + Offline bool `db:"offline" json:"offline"` + URL *string `db:"url" json:"url"` + Headers []byte `db:"headers" json:"headers"` + CommTaskID *int64 `db:"commp_task_id" json:"commp_task_id"` + AfterCommp bool `db:"after_commp" json:"after_commp"` + PSDTaskID *int64 `db:"psd_task_id" json:"psd_task_id"` + AfterPSD bool `db:"after_psd" json:"after_psd"` + PSDWaitTime *time.Time `db:"psd_wait_time" json:"psd_wait_time"` + FindDealTaskID *int64 `db:"find_deal_task_id" json:"find_deal_task_id"` + AfterFindDeal bool `db:"after_find_deal" json:"after_find_deal"` + Sector *int64 `db:"sector" json:"sector"` + Offset *int64 `db:"sector_offset" json:"sector_offset"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + Indexed bool `db:"indexed" json:"indexed"` + Announce bool `db:"announce" json:"announce"` + Complete bool `db:"complete" json:"complete"` + Miner string `json:"miner"` } -func (a *WebRPC) GetDealPipelines(ctx context.Context, limit int, offset int) ([]*MK12Pipeline, error) { +func (a *WebRPC) GetMK12DealPipelines(ctx context.Context, limit int, offset int) ([]*MK12Pipeline, error) { if limit <= 0 { limit = 25 } @@ -163,6 +169,17 @@ func (a *WebRPC) GetDealPipelines(ctx context.Context, limit int, offset int) ([ return nil, xerrors.Errorf("failed to parse the miner ID: %w", err) } s.Miner = addr.String() + if s.RawSize.Valid { + pcid, err := cid.Parse(s.PieceCid) + if err != nil { + return nil, xerrors.Errorf("failed to parse v1 piece CID: %w", err) + } + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(s.RawSize.Int64)) + if err != nil { + return nil, xerrors.Errorf("failed to get commP from piece info: %w", err) + } + s.PieceCidV2 = pcid2.String() + } } return pipelines, nil @@ -171,7 +188,7 @@ func (a *WebRPC) GetDealPipelines(ctx context.Context, limit int, offset int) ([ type StorageDealSummary struct { ID string `db:"uuid" json:"id"` MinerID int64 `db:"sp_id" json:"sp_id"` - Sector *int64 `db:"sector_num" json:"sector"` + Sector sql.NullInt64 `db:"sector_num" json:"sector"` CreatedAt time.Time `db:"created_at" json:"created_at"` SignedProposalCid string `db:"signed_proposal_cid" json:"signed_proposal_cid"` Offline bool `db:"offline" json:"offline"` @@ -179,10 +196,11 @@ type StorageDealSummary struct { StartEpoch int64 `db:"start_epoch" json:"start_epoch"` EndEpoch int64 `db:"end_epoch" json:"end_epoch"` ClientPeerId string `db:"client_peer_id" json:"client_peer_id"` - ChainDealId *int64 `db:"chain_deal_id" json:"chain_deal_id"` - PublishCid *string `db:"publish_cid" json:"publish_cid"` + ChainDealId sql.NullInt64 `db:"chain_deal_id" json:"chain_deal_id"` + PublishCid sql.NullString `db:"publish_cid" json:"publish_cid"` PieceCid string `db:"piece_cid" json:"piece_cid"` PieceSize int64 `db:"piece_size" json:"piece_size"` + RawSize sql.NullInt64 `db:"raw_size"` FastRetrieval bool `db:"fast_retrieval" json:"fast_retrieval"` AnnounceToIpni bool `db:"announce_to_ipni" json:"announce_to_ipni"` Url sql.NullString `db:"url"` @@ -192,29 +210,18 @@ type StorageDealSummary struct { DBError sql.NullString `db:"error"` Error string `json:"error"` Miner string `json:"miner"` - IsLegacy bool `json:"is_legacy"` - Indexed *bool `db:"indexed" json:"indexed"` + Indexed sql.NullBool `db:"indexed" json:"indexed"` IsDDO bool `db:"is_ddo" json:"is_ddo"` + PieceCidV2 string `json:"piece_cid_v2"` } func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDealSummary, error) { - - var isLegacy bool - var pcid cid.Cid - id, err := uuid.Parse(deal) if err != nil { - p, perr := cid.Parse(deal) - if perr != nil { - return &StorageDealSummary{}, xerrors.Errorf("failed to parse the deal ID: %w and %w", err, perr) - } - isLegacy = true - pcid = p + return nil, xerrors.Errorf("failed to parse deal ID: %w", err) } - - if !isLegacy { - var summaries []StorageDealSummary - err = a.deps.DB.Select(ctx, &summaries, `SELECT + var summaries []StorageDealSummary + err = a.deps.DB.Select(ctx, &summaries, `SELECT deal.uuid, deal.sp_id, deal.created_at, @@ -228,6 +235,7 @@ func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDeal deal.publish_cid, deal.piece_cid, deal.piece_size, + deal.raw_size, deal.fast_retrieval, deal.announce_to_ipni, deal.url, @@ -252,6 +260,7 @@ func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDeal md.publish_cid, md.piece_cid, md.piece_size, + md.raw_size, md.fast_retrieval, md.announce_to_ipni, md.url, @@ -278,6 +287,7 @@ func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDeal '' AS publish_cid, mdd.piece_cid, mdd.piece_size, + mdd.raw_size, mdd.fast_retrieval, mdd.announce_to_ipni, '' AS url, @@ -290,106 +300,74 @@ func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDeal LEFT JOIN market_piece_deal mpd ON mpd.id = deal.uuid AND mpd.sp_id = deal.sp_id LEFT JOIN market_piece_metadata mpm - ON mpm.piece_cid = deal.piece_cid; + ON mpm.piece_cid = deal.piece_cid AND mpm.piece_size = deal.piece_size; `, id.String()) - if err != nil { - return &StorageDealSummary{}, xerrors.Errorf("select deal summary: %w", err) - } - - if len(summaries) == 0 { - return nil, xerrors.Errorf("No such deal found in database: %s", id.String()) - } - - d := summaries[0] - d.IsLegacy = isLegacy - - addr, err := address.NewIDAddress(uint64(d.MinerID)) - if err != nil { - return &StorageDealSummary{}, err - } - - if d.Header != nil { - var h http.Header - err = json.Unmarshal(d.Header, &h) - if err != nil { - return &StorageDealSummary{}, err - } - d.UrlHeaders = h - } - - if !d.Url.Valid { - d.URLS = "" - } else { - d.URLS = d.Url.String - } - - if !d.DBError.Valid { - d.Error = "" - } else { - d.Error = d.DBError.String - } - - d.Miner = addr.String() - - return &d, nil - } - - var summaries []StorageDealSummary - err = a.deps.DB.Select(ctx, &summaries, `SELECT - '' AS uuid, - sp_id, - created_at, - signed_proposal_cid, - FALSE as offline, - verified, - start_epoch, - end_epoch, - client_peer_id, - chain_deal_id, - publish_cid, - piece_cid, - piece_size, - fast_retrieval, - FALSE AS announce_to_ipni, - '' AS url, - '{}' AS url_headers, - '' AS error, - sector_num, - FALSE AS indexed - FROM market_legacy_deals - WHERE signed_proposal_cid = $1`, pcid.String()) - if err != nil { - return &StorageDealSummary{}, err + return &StorageDealSummary{}, xerrors.Errorf("select deal summary: %w", err) } if len(summaries) == 0 { - return nil, xerrors.Errorf("No such deal found in database :%s", pcid.String()) + return nil, xerrors.Errorf("No such deal found in database: %s", id.String()) } d := summaries[0] - d.IsLegacy = isLegacy addr, err := address.NewIDAddress(uint64(d.MinerID)) if err != nil { return &StorageDealSummary{}, err } + if d.Header != nil { + var h http.Header + err = json.Unmarshal(d.Header, &h) + if err != nil { + return &StorageDealSummary{}, err + } + d.UrlHeaders = h + } + + if !d.Url.Valid { + d.URLS = "" + } else { + d.URLS = d.Url.String + } + + if !d.DBError.Valid { + d.Error = "" + } else { + d.Error = d.DBError.String + } + d.Miner = addr.String() + if d.RawSize.Valid { + pcid, err := cid.Parse(d.PieceCid) + if err != nil { + return &StorageDealSummary{}, xerrors.Errorf("failed to parse piece CID: %w", err) + } + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(d.RawSize.Int64)) + if err != nil { + return &StorageDealSummary{}, xerrors.Errorf("failed to get commP from piece info: %w", err) + } + d.PieceCidV2 = pcid2.String() + } + return &d, nil + } type StorageDealList struct { - ID string `db:"uuid" json:"id"` - MinerID int64 `db:"sp_id" json:"sp_id"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - PieceCid string `db:"piece_cid" json:"piece_cid"` - PieceSize int64 `db:"piece_size" json:"piece_size"` - Processed bool `db:"processed" json:"processed"` - Error sql.NullString `db:"error" json:"error"` - Miner string `json:"miner"` + ID string `db:"uuid" json:"id"` + MinerID int64 `db:"sp_id" json:"sp_id"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + PieceCidV1 string `db:"piece_cid" json:"piece_cid"` + PieceSize int64 `db:"piece_size" json:"piece_size"` + RawSize sql.NullInt64 `db:"raw_size"` + PieceCidV2 string `json:"piece_cid_v2"` + Processed bool `db:"processed" json:"processed"` + Error sql.NullString `db:"error" json:"error"` + Miner string `json:"miner"` } func (a *WebRPC) MK12StorageDealList(ctx context.Context, limit int, offset int) ([]*StorageDealList, error) { @@ -401,6 +379,7 @@ func (a *WebRPC) MK12StorageDealList(ctx context.Context, limit int, offset int) md.created_at, md.piece_cid, md.piece_size, + md.raw_size, md.error, coalesce(mm12dp.complete, true) as processed FROM market_mk12_deals md @@ -417,37 +396,23 @@ func (a *WebRPC) MK12StorageDealList(ctx context.Context, limit int, offset int) return nil, err } mk12Summaries[i].Miner = addr.String() - } - return mk12Summaries, nil -} - -func (a *WebRPC) LegacyStorageDealList(ctx context.Context, limit int, offset int) ([]StorageDealList, error) { - var mk12Summaries []StorageDealList - - err := a.deps.DB.Select(ctx, &mk12Summaries, `SELECT - signed_proposal_cid AS uuid, - sp_id, - created_at, - piece_cid, - piece_size, - NULL AS error, - TRUE AS processed - FROM market_legacy_deals - ORDER BY created_at DESC - LIMIT $1 OFFSET $2;`, limit, offset) - if err != nil { - return nil, fmt.Errorf("failed to fetch deal list: %w", err) - } - - for i := range mk12Summaries { - addr, err := address.NewIDAddress(uint64(mk12Summaries[i].MinerID)) - if err != nil { - return nil, err + // Find PieceCidV2 only of rawSize is present + // It will be absent only for Offline deals (mk12, mk12-ddo), waiting for data + if mk12Summaries[i].RawSize.Valid { + pcid, err := cid.Parse(mk12Summaries[i].PieceCidV1) + if err != nil { + return nil, xerrors.Errorf("failed to parse v1 piece CID: %w", err) + } + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(mk12Summaries[i].RawSize.Int64)) + if err != nil { + return nil, xerrors.Errorf("failed to get commP from piece info: %w", err) + } + mk12Summaries[i].PieceCidV2 = pcid2.String() } - mk12Summaries[i].Miner = addr.String() } return mk12Summaries, nil + } type WalletBalances struct { @@ -571,26 +536,28 @@ func (a *WebRPC) MoveBalanceToEscrow(ctx context.Context, miner string, amount s } type PieceDeal struct { - ID string `db:"id" json:"id"` - BoostDeal bool `db:"boost_deal" json:"boost_deal"` - LegacyDeal bool `db:"legacy_deal" json:"legacy_deal"` - SpId int64 `db:"sp_id" json:"sp_id"` - ChainDealId int64 `db:"chain_deal_id" json:"chain_deal_id"` - Sector int64 `db:"sector_num" json:"sector"` - Offset int64 `db:"piece_offset" json:"offset"` - Length int64 `db:"piece_length" json:"length"` - RawSize int64 `db:"raw_size" json:"raw_size"` - Miner string `json:"miner"` + ID string `db:"id" json:"id"` + BoostDeal bool `db:"boost_deal" json:"boost_deal"` + LegacyDeal bool `db:"legacy_deal" json:"legacy_deal"` + SpId int64 `db:"sp_id" json:"sp_id"` + ChainDealId int64 `db:"chain_deal_id" json:"chain_deal_id"` + Sector int64 `db:"sector_num" json:"sector"` + Offset sql.NullInt64 `db:"piece_offset" json:"offset"` + Length int64 `db:"piece_length" json:"length"` + RawSize int64 `db:"raw_size" json:"raw_size"` + Miner string `json:"miner"` + MK20 bool `db:"-" json:"mk20"` } type PieceInfo struct { - PieceCid string `json:"piece_cid"` - Size int64 `json:"size"` - CreatedAt time.Time `json:"created_at"` - Indexed bool `json:"indexed"` - IndexedAT time.Time `json:"indexed_at"` - IPNIAd string `json:"ipni_ad"` - Deals []*PieceDeal `json:"deals"` + PieceCidv2 string `json:"piece_cid_v2"` + PieceCid string `json:"piece_cid"` + Size int64 `json:"size"` + CreatedAt time.Time `json:"created_at"` + Indexed bool `json:"indexed"` + IndexedAT time.Time `json:"indexed_at"` + IPNIAd []string `json:"ipni_ads"` + Deals []*PieceDeal `json:"deals"` } func (a *WebRPC) PieceInfo(ctx context.Context, pieceCid string) (*PieceInfo, error) { @@ -599,10 +566,25 @@ func (a *WebRPC) PieceInfo(ctx context.Context, pieceCid string) (*PieceInfo, er return nil, err } - ret := &PieceInfo{} + if !commcidv2.IsPieceCidV2(piece) { + return nil, xerrors.Errorf("invalid piece CID V2: %w", err) + } + + commp, err := commcidv2.CommPFromPCidV2(piece) + if err != nil { + return nil, xerrors.Errorf("failed to get commP from piece CID: %w", err) + } - err = a.deps.DB.QueryRow(ctx, `SELECT created_at, indexed, indexed_at FROM market_piece_metadata WHERE piece_cid = $1`, piece.String()).Scan(&ret.CreatedAt, &ret.Indexed, &ret.IndexedAT) - if err != nil && err != pgx.ErrNoRows { + pi := commp.PieceInfo() + + ret := &PieceInfo{ + PieceCidv2: piece.String(), + PieceCid: pi.PieceCID.String(), + Size: int64(pi.Size), + } + + err = a.deps.DB.QueryRow(ctx, `SELECT created_at, indexed, indexed_at FROM market_piece_metadata WHERE piece_cid = $1 AND piece_size = $2`, pi.PieceCID.String(), pi.Size).Scan(&ret.CreatedAt, &ret.Indexed, &ret.IndexedAT) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return nil, xerrors.Errorf("failed to get piece metadata: %w", err) } @@ -619,26 +601,27 @@ func (a *WebRPC) PieceInfo(ctx context.Context, pieceCid string) (*PieceInfo, er piece_length, raw_size FROM market_piece_deal - WHERE piece_cid = $1`, piece.String()) + WHERE piece_cid = $1 AND piece_length = $2`, pi.PieceCID.String(), pi.Size) if err != nil { return nil, xerrors.Errorf("failed to get piece deals: %w", err) } for i := range pieceDeals { - addr, err := address.NewIDAddress(uint64(pieceDeals[i].SpId)) - if err != nil { - return nil, err + if pieceDeals[i].SpId == -1 { + pieceDeals[i].Miner = "PDP" + } else { + addr, err := address.NewIDAddress(uint64(pieceDeals[i].SpId)) + if err != nil { + return nil, err + } + _, err = uuid.Parse(pieceDeals[i].ID) + if err != nil { + pieceDeals[i].MK20 = true + } + pieceDeals[i].Miner = addr.String() } - pieceDeals[i].Miner = addr.String() - ret.Size = pieceDeals[i].Length } ret.Deals = pieceDeals - ret.PieceCid = piece.String() - - pi := abi.PieceInfo{ - PieceCID: piece, - Size: abi.PaddedPieceSize(ret.Size), - } b := new(bytes.Buffer) @@ -647,14 +630,48 @@ func (a *WebRPC) PieceInfo(ctx context.Context, pieceCid string) (*PieceInfo, er return nil, xerrors.Errorf("failed to marshal piece info: %w", err) } + c1 := itype.PdpIpniContext{ + PieceCID: piece, + Payload: true, + } + + c1b, err := c1.Marshal() + if err != nil { + return nil, xerrors.Errorf("failed to marshal PDP piece info: %w", err) + } + fmt.Printf("C1B: %x", c1b) + + c2 := itype.PdpIpniContext{ + PieceCID: piece, + Payload: false, + } + + c2b, err := c2.Marshal() + if err != nil { + return nil, xerrors.Errorf("failed to marshal PDP piece info: %w", err) + } + fmt.Printf("C2B: %x", c2b) + // Get only the latest Ad var ipniAd string err = a.deps.DB.QueryRow(ctx, `SELECT ad_cid FROM ipni WHERE context_id = $1 ORDER BY order_number DESC LIMIT 1`, b.Bytes()).Scan(&ipniAd) - if err != nil && err != pgx.ErrNoRows { - return nil, xerrors.Errorf("failed to get deal ID by piece CID: %w", err) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, xerrors.Errorf("failed to get ad ID by piece CID: %w", err) + } + + var ipniAdPdp string + err = a.deps.DB.QueryRow(ctx, `SELECT ad_cid FROM ipni WHERE context_id = $1 ORDER BY order_number DESC LIMIT 1`, c1b).Scan(&ipniAdPdp) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, xerrors.Errorf("failed to get ad ID by piece CID for PDP: %w", err) + } + + var ipniAdPdp1 string + err = a.deps.DB.QueryRow(ctx, `SELECT ad_cid FROM ipni WHERE context_id = $1 ORDER BY order_number DESC LIMIT 1`, c2b).Scan(&ipniAdPdp1) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, xerrors.Errorf("failed to get ad ID by piece CID for PDP: %w", err) } - ret.IPNIAd = ipniAd + ret.IPNIAd = append(ret.IPNIAd, ipniAd, ipniAdPdp, ipniAdPdp1) return ret, nil } @@ -679,18 +696,34 @@ type ParkedPieceRef struct { // PieceParkStates retrieves the park states for a given piece CID func (a *WebRPC) PieceParkStates(ctx context.Context, pieceCID string) (*ParkedPieceState, error) { + pcid, err := cid.Parse(pieceCID) + if err != nil { + return nil, err + } + + if !commcidv2.IsPieceCidV2(pcid) { + return nil, xerrors.Errorf("invalid piece CID V2: %w", err) + } + + commp, err := commcidv2.CommPFromPCidV2(pcid) + if err != nil { + return nil, xerrors.Errorf("failed to get commP from piece CID: %w", err) + } + + pi := commp.PieceInfo() + var pps ParkedPieceState // Query the parked_pieces table - err := a.deps.DB.QueryRow(ctx, ` + err = a.deps.DB.QueryRow(ctx, ` SELECT id, created_at, piece_cid, piece_padded_size, piece_raw_size, complete, task_id, cleanup_task_id - FROM parked_pieces WHERE piece_cid = $1 - `, pieceCID).Scan( + FROM parked_pieces WHERE piece_cid = $1 AND piece_padded_size = $2 + `, pi.PieceCID.String(), pi.Size).Scan( &pps.ID, &pps.CreatedAt, &pps.PieceCID, &pps.PiecePaddedSize, &pps.PieceRawSize, &pps.Complete, &pps.TaskID, &pps.CleanupTaskID, ) if err != nil { - if err == pgx.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { return nil, nil } return nil, fmt.Errorf("failed to query parked piece: %w", err) @@ -798,16 +831,88 @@ type MK12DealPipeline struct { CreatedAt time.Time `db:"created_at" json:"created_at"` } -// MK12DealDetailEntry combines a deal and its pipeline -type MK12DealDetailEntry struct { +// MK20DealPipeline represents a record from market_mk20_ddo_pipeline table +type MK20DDOPipeline struct { + ID string `db:"id" json:"id"` + SpId int64 `db:"sp_id" json:"sp_id"` + Contract string `db:"contract" json:"contract"` + Client string `db:"client" json:"client"` + PieceCidV2 string `db:"piece_cid_v2" json:"piece_cid_v2"` + PieceCid string `db:"piece_cid" json:"piece_cid"` + PieceSize int64 `db:"piece_size" json:"piece_size"` + RawSize uint64 `db:"raw_size" json:"raw_size"` + Offline bool `db:"offline" json:"offline"` + URL sql.NullString `db:"url" json:"url"` + Indexing bool `db:"indexing" json:"indexing"` + Announce bool `db:"announce" json:"announce"` + AllocationID sql.NullInt64 `db:"allocation_id" json:"allocation_id"` + Duration int64 `db:"duration" json:"duration"` + PieceAggregation int `db:"piece_aggregation" json:"piece_aggregation"` + + Started bool `db:"started" json:"started"` + Downloaded bool `db:"downloaded" json:"downloaded"` + + CommpTaskId sql.NullInt64 `db:"commp_task_id" json:"commp_task_id"` + AfterCommp bool `db:"after_commp" json:"after_commp"` + + DealAggregation int `db:"deal_aggregation" json:"deal_aggregation"` + AggregationIndex int64 `db:"aggr_index" json:"aggr_index"` + AggregationTaskID sql.NullInt64 `db:"agg_task_id" json:"agg_task_id"` + Aggregated bool `db:"aggregated" json:"aggregated"` + + Sector sql.NullInt64 `db:"sector" json:"sector"` + RegSealProof sql.NullInt64 `db:"reg_seal_proof" json:"reg_seal_proof"` + SectorOffset sql.NullInt64 `db:"sector_offset" json:"sector_offset"` + Sealed bool `db:"sealed" json:"sealed"` + + IndexingCreatedAt sql.NullTime `db:"indexing_created_at" json:"indexing_created_at"` + IndexingTaskId sql.NullInt64 `db:"indexing_task_id" json:"indexing_task_id"` + Indexed bool `db:"indexed" json:"indexed"` + + Complete bool `db:"complete" json:"complete"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + + Miner string `db:"-" json:"miner"` +} + +type PieceInfoMK12Deals struct { Deal *MK12Deal `json:"deal"` - Pipeline *MK12DealPipeline `json:"pipeline,omitempty"` + Pipeline *MK12DealPipeline `json:"mk12_pipeline,omitempty"` +} + +type PieceInfoMK20Deals struct { + Deal *MK20StorageDeal `json:"deal"` + DDOPipeline *MK20DDOPipeline `json:"mk20_ddo_pipeline,omitempty"` + PDPPipeline *MK20PDPPipeline `json:"mk20_pdp_pipeline,omitempty"` +} + +// PieceDealDetailEntry combines a deal and its pipeline +type PieceDealDetailEntry struct { + MK12 []PieceInfoMK12Deals `json:"mk12"` + MK20 []PieceInfoMK20Deals `json:"mk20"` } -func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12DealDetailEntry, error) { +func (a *WebRPC) PieceDealDetail(ctx context.Context, pieceCid string) (*PieceDealDetailEntry, error) { + pcid, err := cid.Parse(pieceCid) + if err != nil { + return nil, err + } + + if !commcidv2.IsPieceCidV2(pcid) { + return nil, xerrors.Errorf("invalid piece CID V2: %w", err) + } + + commp, err := commcidv2.CommPFromPCidV2(pcid) + if err != nil { + return nil, err + } + + pieceCid = commp.PieceInfo().PieceCID.String() + size := commp.PieceInfo().Size + var mk12Deals []*MK12Deal - err := a.deps.DB.Select(ctx, &mk12Deals, ` + err = a.deps.DB.Select(ctx, &mk12Deals, ` SELECT uuid, sp_id, @@ -832,7 +937,7 @@ func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12Dea error, FALSE AS is_ddo FROM market_mk12_deals - WHERE piece_cid = $1 + WHERE piece_cid = $1 AND piece_size = $2 UNION ALL @@ -860,7 +965,7 @@ func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12Dea NULL AS error, -- NULL handled by Go (sql.NullString) TRUE AS is_ddo FROM market_direct_deals - WHERE piece_cid = $1`, pieceCid) + WHERE piece_cid = $1 AND piece_size = $2`, pieceCid, size) if err != nil { return nil, err } @@ -909,7 +1014,7 @@ func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12Dea WHERE uuid = ANY($1) `, uuids) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to query mk12 pipelines: %w", err) } } @@ -919,9 +1024,135 @@ func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12Dea pipelineMap[pipeline.UUID] = pipeline } - var entries []MK12DealDetailEntry + var mk20Deals []*mk20.DBDeal + err = a.deps.DB.Select(ctx, &mk20Deals, `SELECT + id, + client, + data, + ddo_v1, + retrieval_v1, + pdp_v1 FROM market_mk20_deal WHERE piece_cid_v2 = $1`, pcid.String()) + if err != nil { + return nil, xerrors.Errorf("failed to query mk20 deals: %w", err) + } + + ids := make([]string, len(mk20Deals)) + mk20deals := make([]*MK20StorageDeal, len(mk20Deals)) + + for i, dbdeal := range mk20Deals { + deal, err := dbdeal.ToDeal() + if err != nil { + return nil, err + } + ids[i] = deal.Identifier.String() + + var Err sql.NullString + + if len(dbdeal.DDOv1) > 0 && string(dbdeal.DDOv1) != "null" { + var dddov1 mk20.DBDDOV1 + if err := json.Unmarshal(dbdeal.DDOv1, &dddov1); err != nil { + return nil, fmt.Errorf("unmarshal ddov1: %w", err) + } + if dddov1.Error != "" { + Err.String = dddov1.Error + Err.Valid = true + } + } + + mk20deals[i] = &MK20StorageDeal{ + Deal: deal, + DDOErr: Err, + } + } + + var mk20Pipelines []MK20DDOPipeline + err = a.deps.DB.Select(ctx, &mk20Pipelines, ` + SELECT + created_at, + id, + sp_id, + contract, + client, + piece_cid_v2, + piece_cid, + piece_size, + raw_size, + offline, + url, + indexing, + announce, + allocation_id, + piece_aggregation, + started, + downloaded, + commp_task_id, + after_commp, + deal_aggregation, + aggr_index, + agg_task_id, + aggregated, + sector, + reg_seal_proof, + sector_offset, + sealed, + indexing_created_at, + indexing_task_id, + indexed, + complete + FROM market_mk20_pipeline + WHERE id = ANY($1)`, ids) + if err != nil { + return nil, xerrors.Errorf("failed to query mk20 DDO pipelines: %w", err) + } + + var mk20PDPPipelines []MK20PDPPipeline + err = a.deps.DB.Select(ctx, &mk20PDPPipelines, ` + SELECT + created_at, + id, + client, + piece_cid_v2, + indexing, + announce, + announce_payload, + downloaded, + commp_task_id, + after_commp, + deal_aggregation, + aggr_index, + agg_task_id, + aggregated, + add_piece_task_id, + after_add_piece, + after_add_piece_msg, + save_cache_task_id, + after_save_cache, + indexing_created_at, + indexing_task_id, + indexed, + complete + FROM pdp_pipeline + WHERE id = ANY($1)`, ids) + if err != nil { + return nil, xerrors.Errorf("failed to query mk20 PDP pipelines: %w", err) + } + + mk20pipelineMap := make(map[string]MK20DDOPipeline) + for _, pipeline := range mk20Pipelines { + pipeline := pipeline + mk20pipelineMap[pipeline.ID] = pipeline + } + + mk20PDPpipelineMap := make(map[string]MK20PDPPipeline) + for _, pipeline := range mk20PDPPipelines { + pipeline := pipeline + mk20PDPpipelineMap[pipeline.ID] = pipeline + } + + ret := &PieceDealDetailEntry{} + for _, deal := range mk12Deals { - entry := MK12DealDetailEntry{ + entry := PieceInfoMK12Deals{ Deal: deal, } if pipeline, exists := pipelineMap[deal.UUID]; exists { @@ -929,10 +1160,30 @@ func (a *WebRPC) MK12DealDetail(ctx context.Context, pieceCid string) ([]MK12Dea } else { entry.Pipeline = nil // Pipeline may not exist for processed and active deals } - entries = append(entries, entry) + ret.MK12 = append(ret.MK12, entry) } - return entries, nil + for _, deal := range mk20deals { + entry := PieceInfoMK20Deals{ + Deal: deal, + } + if pipeline, exists := mk20pipelineMap[deal.Deal.Identifier.String()]; exists { + entry.DDOPipeline = &pipeline + } else { + entry.DDOPipeline = nil // Pipeline may not exist for processed and active deals + } + if pipeline, exists := mk20PDPpipelineMap[deal.Deal.Identifier.String()]; exists { + entry.PDPPipeline = &pipeline + } else { + entry.PDPPipeline = nil + } + if ret.MK20 == nil { + ret.MK20 = make([]PieceInfoMK20Deals, 0) + } + ret.MK20 = append(ret.MK20, entry) + } + + return ret, nil } func firstOrZero[T any](a []T) T { @@ -942,7 +1193,104 @@ func firstOrZero[T any](a []T) T { return a[0] } -func (a *WebRPC) MK12DealPipelineRemove(ctx context.Context, uuid string) error { +func (a *WebRPC) DealPipelineRemove(ctx context.Context, id string) error { + _, err := ulid.Parse(id) + if err != nil { + _, err = uuid.Parse(id) + if err != nil { + return xerrors.Errorf("invalid pipeline id: %w", err) + } + return a.mk12DealPipelineRemove(ctx, id) + } + return a.mk20DealPipelineRemove(ctx, id) +} + +func (a *WebRPC) mk20DealPipelineRemove(ctx context.Context, id string) error { + _, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var pipelines []struct { + Url string `db:"url"` + Sector sql.NullInt64 `db:"sector"` + + CommpTaskID sql.NullInt64 `db:"commp_task_id"` + AggrTaskID sql.NullInt64 `db:"agg_task_id"` + IndexingTaskID sql.NullInt64 `db:"indexing_task_id"` + } + + err = tx.Select(&pipelines, `SELECT url, sector, commp_task_id, agg_task_id, indexing_task_id + FROM market_mk20_pipeline WHERE id = $1`, id) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("no deal pipeline found with id %s", id) + } + return false, err + } + + if len(pipelines) == 0 { + return false, fmt.Errorf("no deal pipeline found with id %s", id) + } + + // Collect non-null task IDs + var taskIDs []int64 + for _, pipeline := range pipelines { + if pipeline.CommpTaskID.Valid { + taskIDs = append(taskIDs, pipeline.CommpTaskID.Int64) + } + if pipeline.AggrTaskID.Valid { + taskIDs = append(taskIDs, pipeline.AggrTaskID.Int64) + } + if pipeline.IndexingTaskID.Valid { + taskIDs = append(taskIDs, pipeline.IndexingTaskID.Int64) + } + } + + // Check if any tasks are still running + if len(taskIDs) > 0 { + var runningTasks int + err = tx.QueryRow(`SELECT COUNT(*) FROM harmony_task WHERE id = ANY($1)`, taskIDs).Scan(&runningTasks) + if err != nil { + return false, err + } + if runningTasks > 0 { + return false, fmt.Errorf("cannot remove deal pipeline %s: tasks are still running", id) + } + } + + //Mark failure for deal + _, err = tx.Exec(`UPDATE market_mk20_deal SET error = $1 WHERE id = $2`, "Deal pipeline removed by SP", id) + if err != nil { + return false, xerrors.Errorf("failed to mark deal %s as failed", id) + } + + // Remove market_mk20_pipeline entry + _, err = tx.Exec(`DELETE FROM market_mk20_pipeline WHERE id = $1`, id) + if err != nil { + return false, err + } + + // If sector is null, remove related pieceref + for _, pipeline := range pipelines { + if !pipeline.Sector.Valid { + const prefix = "pieceref:" + if strings.HasPrefix(pipeline.Url, prefix) { + refIDStr := pipeline.Url[len(prefix):] + refID, err := strconv.ParseInt(refIDStr, 10, 64) + if err != nil { + return false, fmt.Errorf("invalid refID in URL: %v", err) + } + // Remove from parked_piece_refs where ref_id = refID + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + if err != nil { + return false, err + } + } + } + } + return true, nil + }, harmonydb.OptionRetry()) + return err +} + +func (a *WebRPC) mk12DealPipelineRemove(ctx context.Context, uuid string) error { _, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { // First, get deal_pipeline.url, task_ids, and sector values var ( @@ -960,7 +1308,7 @@ func (a *WebRPC) MK12DealPipelineRemove(ctx context.Context, uuid string) error &url, §or, &commpTaskID, &psdTaskID, &findDealTaskID, &indexingTaskID, ) if err != nil { - if err == sql.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { return false, fmt.Errorf("no deal pipeline found with uuid %s", uuid) } return false, err @@ -1037,7 +1385,7 @@ func (a *WebRPC) MK12DealPipelineRemove(ctx context.Context, uuid string) error return err } -type PipelineFailedStats struct { +type MK12PipelineFailedStats struct { DownloadingFailed int64 CommPFailed int64 PSDFailed int64 @@ -1045,7 +1393,7 @@ type PipelineFailedStats struct { IndexFailed int64 } -func (a *WebRPC) PipelineFailedTasksMarket(ctx context.Context) (*PipelineFailedStats, error) { +func (a *WebRPC) MK12PipelineFailedTasks(ctx context.Context) (*MK12PipelineFailedStats, error) { // We'll create a similar query, but this time we coalesce the task IDs from harmony_task. // If the join fails (no matching harmony_task), all joined fields for that task will be NULL. // We detect failure by checking that xxx_task_id IS NOT NULL, after_xxx = false, and that no task record was found in harmony_task. @@ -1064,7 +1412,7 @@ WITH pipeline_data AS ( dp.after_find_deal, pp.task_id AS downloading_task_id FROM market_mk12_deal_pipeline dp - LEFT JOIN parked_pieces pp ON pp.piece_cid = dp.piece_cid + LEFT JOIN parked_pieces pp ON pp.piece_cid = dp.piece_cid AND pp.piece_padded_size = dp.piece_size WHERE dp.complete = false ), tasks AS ( @@ -1143,7 +1491,7 @@ FROM tasks counts := c[0] - return &PipelineFailedStats{ + return &MK12PipelineFailedStats{ DownloadingFailed: counts.DownloadingFailed, CommPFailed: counts.CommPFailed, PSDFailed: counts.PSDFailed, @@ -1152,7 +1500,7 @@ FROM tasks }, nil } -func (a *WebRPC) BulkRestartFailedMarketTasks(ctx context.Context, taskType string) error { +func (a *WebRPC) MK12BulkRestartFailedMarketTasks(ctx context.Context, taskType string) error { didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { var rows *harmonydb.Query var err error @@ -1278,7 +1626,7 @@ func (a *WebRPC) BulkRestartFailedMarketTasks(ctx context.Context, taskType stri return nil } -func (a *WebRPC) BulkRemoveFailedMarketPipelines(ctx context.Context, taskType string) error { +func (a *WebRPC) MK12BulkRemoveFailedMarketPipelines(ctx context.Context, taskType string) error { didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { var rows *harmonydb.Query var err error @@ -1459,6 +1807,7 @@ func (a *WebRPC) MK12DDOStorageDealList(ctx context.Context, limit int, offset i md.created_at, md.piece_cid, md.piece_size, + md.raw_size, md.error, coalesce(mm12dp.complete, true) as processed FROM market_direct_deals md @@ -1475,6 +1824,18 @@ func (a *WebRPC) MK12DDOStorageDealList(ctx context.Context, limit int, offset i return nil, err } mk12Summaries[i].Miner = addr.String() + + if mk12Summaries[i].RawSize.Valid { + pcid, err := cid.Parse(mk12Summaries[i].PieceCidV1) + if err != nil { + return nil, xerrors.Errorf("failed to parse v1 piece CID: %w", err) + } + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(mk12Summaries[i].RawSize.Int64)) + if err != nil { + return nil, xerrors.Errorf("failed to convert v1 piece CID to v2: %w", err) + } + mk12Summaries[i].PieceCidV2 = pcid2.String() + } } return mk12Summaries, nil diff --git a/web/api/webrpc/market_2.go b/web/api/webrpc/market_2.go new file mode 100644 index 000000000..aeefb9b47 --- /dev/null +++ b/web/api/webrpc/market_2.go @@ -0,0 +1,1528 @@ +package webrpc + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + eabi "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/oklog/ulid" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/market/mk20" +) + +type MK20StorageDeal struct { + Deal *mk20.Deal `json:"deal"` + DDOErr sql.NullString `json:"ddoerr"` + PDPErr sql.NullString `json:"pdperr"` + DDOId sql.NullInt64 `json:"ddoid"` +} + +func (a *WebRPC) MK20DDOStorageDeal(ctx context.Context, id string) (*MK20StorageDeal, error) { + pid, err := ulid.Parse(id) + if err != nil { + return nil, xerrors.Errorf("parsing deal ID: %w", err) + } + + var dbDeals []mk20.DBDeal + err = a.deps.DB.Select(ctx, &dbDeals, `SELECT id, + client, + data, + ddo_v1, + retrieval_v1, + pdp_v1 FROM market_mk20_deal WHERE id = $1`, pid.String()) + if err != nil { + return nil, xerrors.Errorf("getting deal from DB: %w", err) + } + if len(dbDeals) != 1 { + return nil, xerrors.Errorf("expected 1 deal, got %d", len(dbDeals)) + } + dbDeal := dbDeals[0] + deal, err := dbDeal.ToDeal() + if err != nil { + return nil, xerrors.Errorf("converting DB deal to struct: %w", err) + } + + ret := &MK20StorageDeal{Deal: deal} + + if len(dbDeal.DDOv1) > 0 && string(dbDeal.DDOv1) != "null" { + var dddov1 mk20.DBDDOV1 + if err := json.Unmarshal(dbDeal.DDOv1, &dddov1); err != nil { + return nil, fmt.Errorf("unmarshal ddov1: %w", err) + } + if dddov1.Error != "" { + ret.DDOErr = sql.NullString{String: dddov1.Error, Valid: true} + } + if dddov1.DealID > 0 { + ret.DDOId = sql.NullInt64{Int64: dddov1.DealID, Valid: true} + } + } + + if len(dbDeal.PDPV1) > 0 && string(dbDeal.PDPV1) != "null" { + var pdpv1 mk20.DBPDPV1 + if err := json.Unmarshal(dbDeal.PDPV1, &pdpv1); err != nil { + return nil, fmt.Errorf("unmarshal pdpv1: %w", err) + } + if pdpv1.Error != "" { + ret.PDPErr = sql.NullString{String: pdpv1.Error, Valid: true} + } + } + + return ret, nil +} + +type MK20StorageDealList struct { + ID string `db:"id" json:"id"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + PieceCidV2 sql.NullString `db:"piece_cid_v2" json:"piece_cid_v2"` + Processed bool `db:"processed" json:"processed"` + Error sql.NullString `db:"error" json:"error"` + Miner sql.NullString `db:"miner" json:"miner"` +} + +func (a *WebRPC) MK20DDOStorageDeals(ctx context.Context, limit int, offset int) ([]*MK20StorageDealList, error) { + var mk20Summaries []*MK20StorageDealList + + err := a.deps.DB.Select(ctx, &mk20Summaries, `SELECT + d.created_at, + d.id, + d.piece_cid_v2, + (d.ddo_v1->'ddo'->>'provider')::text AS miner, + (d.ddo_v1->>'error')::text AS error, + CASE + WHEN EXISTS ( + SELECT 1 FROM market_mk20_pipeline_waiting w + WHERE w.id = d.id + ) THEN FALSE + WHEN EXISTS ( + SELECT 1 FROM market_mk20_pipeline p + WHERE p.id = d.id AND p.complete = FALSE + ) THEN FALSE + ELSE TRUE + END AS processed + FROM market_mk20_deal d + WHERE d.ddo_v1 IS NOT NULL AND d.ddo_v1 != 'null' + ORDER BY d.created_at DESC + LIMIT $1 OFFSET $2;`, limit, offset) + if err != nil { + return nil, fmt.Errorf("failed to fetch deal list: %w", err) + } + + return mk20Summaries, nil +} + +func (a *WebRPC) MK20DDOPipelines(ctx context.Context, limit int, offset int) ([]*MK20DDOPipeline, error) { + if limit <= 0 { + limit = 25 + } + if limit > 100 { + limit = 100 + } + if offset < 0 { + offset = 0 + } + + var pipelines []*MK20DDOPipeline + err := a.deps.DB.Select(ctx, &pipelines, ` + SELECT + created_at, + id, + sp_id, + contract, + client, + piece_cid_v2, + piece_cid, + piece_size, + raw_size, + offline, + url, + indexing, + announce, + allocation_id, + piece_aggregation, + started, + downloaded, + commp_task_id, + after_commp, + deal_aggregation, + aggr_index, + agg_task_id, + aggregated, + sector, + reg_seal_proof, + sector_offset, + sealed, + indexing_created_at, + indexing_task_id, + indexed, + complete + FROM market_mk20_pipeline + ORDER BY created_at DESC + LIMIT $1 OFFSET $2`, limit, offset) + if err != nil { + return nil, fmt.Errorf("failed to fetch deal pipelines: %w", err) + } + + for _, s := range pipelines { + addr, err := address.NewIDAddress(uint64(s.SpId)) + if err != nil { + return nil, xerrors.Errorf("failed to parse the miner ID: %w", err) + } + s.Miner = addr.String() + } + + return pipelines, nil +} + +type MK20PipelineFailedStats struct { + DownloadingFailed int64 + CommPFailed int64 + AggFailed int64 + IndexFailed int64 +} + +func (a *WebRPC) MK20PipelineFailedTasks(ctx context.Context) (*MK20PipelineFailedStats, error) { + // We'll create a similar query, but this time we coalesce the task IDs from harmony_task. + // If the join fails (no matching harmony_task), all joined fields for that task will be NULL. + // We detect failure by checking that xxx_task_id IS NOT NULL, after_xxx = false, and that no task record was found in harmony_task. + + const query = ` + WITH pipeline_data AS ( + SELECT dp.id, + dp.complete, + dp.commp_task_id, + dp.agg_task_id, + dp.indexing_task_id, + dp.sector, + dp.after_commp, + dp.aggregated, + pp.task_id AS downloading_task_id + FROM market_mk20_pipeline dp + LEFT JOIN parked_pieces pp ON pp.piece_cid = dp.piece_cid AND pp.piece_padded_size = dp.piece_size + WHERE dp.complete = false + ), + tasks AS ( + SELECT p.*, + dt.id AS downloading_tid, + ct.id AS commp_tid, + pt.id AS agg_tid, + it.id AS index_tid + FROM pipeline_data p + LEFT JOIN harmony_task dt ON dt.id = p.downloading_task_id + LEFT JOIN harmony_task ct ON ct.id = p.commp_task_id + LEFT JOIN harmony_task pt ON pt.id = p.agg_task_id + LEFT JOIN harmony_task it ON it.id = p.indexing_task_id + ) + SELECT + -- Downloading failed: + -- downloading_task_id IS NOT NULL, after_commp = false (haven't completed commp stage), + -- and downloading_tid IS NULL (no harmony_task record) + COUNT(*) FILTER ( + WHERE downloading_task_id IS NOT NULL + AND after_commp = false + AND downloading_tid IS NULL + ) AS downloading_failed, + + -- CommP (verify) failed: + -- commp_task_id IS NOT NULL, after_commp = false, commp_tid IS NULL + COUNT(*) FILTER ( + WHERE commp_task_id IS NOT NULL + AND after_commp = false + AND commp_tid IS NULL + ) AS commp_failed, + + -- Aggregation failed: + -- agg_task_id IS NOT NULL, aggregated = false, agg_tid IS NULL + COUNT(*) FILTER ( + WHERE agg_task_id IS NOT NULL + AND aggregated = false + AND agg_tid IS NULL + ) AS agg_failed, + + -- Index failed: + -- indexing_task_id IS NOT NULL and if we assume indexing is after find_deal: + -- If indexing_task_id is set, we are presumably at indexing stage. + -- If index_tid IS NULL (no task found), then it's failed. + -- We don't have after_index, now at indexing. + COUNT(*) FILTER ( + WHERE indexing_task_id IS NOT NULL + AND index_tid IS NULL + AND aggregated = true + ) AS index_failed + FROM tasks + ` + + var c []struct { + DownloadingFailed int64 `db:"downloading_failed"` + CommPFailed int64 `db:"commp_failed"` + AggFailed int64 `db:"agg_failed"` + IndexFailed int64 `db:"index_failed"` + } + + err := a.deps.DB.Select(ctx, &c, query) + if err != nil { + return nil, xerrors.Errorf("failed to run failed task query: %w", err) + } + + counts := c[0] + + return &MK20PipelineFailedStats{ + DownloadingFailed: counts.DownloadingFailed, + CommPFailed: counts.CommPFailed, + AggFailed: counts.AggFailed, + IndexFailed: counts.IndexFailed, + }, nil +} + +func (a *WebRPC) MK20BulkRestartFailedMarketTasks(ctx context.Context, taskType string) error { + didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + var rows *harmonydb.Query + var err error + + switch taskType { + case "downloading": + rows, err = tx.Query(` + SELECT pp.task_id + FROM market_mk20_pipeline dp + LEFT JOIN parked_pieces pp ON pp.piece_cid = dp.piece_cid AND pp.piece_padded_size = dp.piece_size + LEFT JOIN harmony_task h ON h.id = pp.task_id + WHERE dp.downloaded = false + AND h.id IS NULL + `) + case "commp": + rows, err = tx.Query(` + SELECT dp.commp_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.commp_task_id + WHERE dp.complete = false + AND dp.downloaded = true + AND dp.commp_task_id IS NOT NULL + AND dp.after_commp = false + AND h.id IS NULL + `) + case "aggregate": + rows, err = tx.Query(` + SELECT dp.agg_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND dp.after_commp = true + AND dp.agg_task_id IS NOT NULL + AND dp.aggregated = false + AND h.id IS NULL + `) + case "index": + rows, err = tx.Query(` + SELECT dp.indexing_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.indexing_task_id + WHERE dp.complete = false + AND dp.indexing_task_id IS NOT NULL + AND dp.sealed = true + AND h.id IS NULL + `) + default: + return false, fmt.Errorf("unknown task type: %s", taskType) + } + + if err != nil { + return false, fmt.Errorf("failed to query failed tasks: %w", err) + } + defer rows.Close() + + var taskIDs []int64 + for rows.Next() { + var tid int64 + if err := rows.Scan(&tid); err != nil { + return false, fmt.Errorf("failed to scan task_id: %w", err) + } + taskIDs = append(taskIDs, tid) + } + + if err := rows.Err(); err != nil { + return false, fmt.Errorf("row iteration error: %w", err) + } + + for _, taskID := range taskIDs { + var name string + var posted time.Time + var result bool + err = tx.QueryRow(` + SELECT name, posted, result + FROM harmony_task_history + WHERE task_id = $1 + ORDER BY id DESC LIMIT 1 + `, taskID).Scan(&name, &posted, &result) + if errors.Is(err, pgx.ErrNoRows) { + // No history means can't restart this task + continue + } else if err != nil { + return false, fmt.Errorf("failed to query history: %w", err) + } + + // If result=true means the task ended successfully, no restart needed + if result { + continue + } + + log.Infow("restarting task", "task_id", taskID, "name", name) + + _, err = tx.Exec(` + INSERT INTO harmony_task (id, initiated_by, update_time, posted_time, owner_id, added_by, previous_task, name) + VALUES ($1, NULL, NOW(), $2, NULL, $3, NULL, $4) + `, taskID, posted, a.deps.MachineID, name) + if err != nil { + return false, fmt.Errorf("failed to insert harmony_task for task_id %d: %w", taskID, err) + } + } + + // All done successfully, commit the transaction + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return err + } + if !didCommit { + return fmt.Errorf("transaction did not commit") + } + + return nil +} + +func (a *WebRPC) MK20BulkRemoveFailedMarketPipelines(ctx context.Context, taskType string) error { + didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + var rows *harmonydb.Query + var err error + + // We'll select pipeline fields directly based on the stage conditions + switch taskType { + case "downloading": + rows, err = tx.Query(` + SELECT dp.id, dp.url, dp.sector, + dp.commp_task_id, dp.agg_task_id, dp.indexing_task_id + FROM market_mk20_pipeline dp + LEFT JOIN parked_pieces pp ON pp.piece_cid = dp.piece_cid AND pp.piece_padded_size = dp.piece_size + LEFT JOIN harmony_task h ON h.id = pp.task_id + WHERE dp.complete = false + AND dp.downloaded = false + AND pp.task_id IS NOT NULL + AND h.id IS NULL + `) + case "commp": + rows, err = tx.Query(` + SELECT dp.id, dp.url, dp.sector, + dp.commp_task_id, dp.agg_task_id, dp.indexing_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.commp_task_id + WHERE dp.complete = false + AND dp.downloaded = true + AND dp.commp_task_id IS NOT NULL + AND dp.after_commp = false + AND h.id IS NULL + `) + case "aggregate": + rows, err = tx.Query(` + SELECT dp.id, dp.url, dp.sector, + dp.commp_task_id, dp.agg_task_id, dp.indexing_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND after_commp = true + AND dp.agg_task_id IS NOT NULL + AND dp.aggregated = false + AND h.id IS NULL + `) + case "index": + rows, err = tx.Query(` + SELECT dp.id, dp.url, dp.sector, + dp.commp_task_id, dp.agg_task_id, dp.indexing_task_id + FROM market_mk20_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.indexing_task_id + WHERE dp.complete = false + AND sealed = true + AND dp.indexing_task_id IS NOT NULL + AND h.id IS NULL + `) + default: + return false, fmt.Errorf("unknown task type: %s", taskType) + } + + if err != nil { + return false, fmt.Errorf("failed to query failed pipelines: %w", err) + } + defer rows.Close() + + type pipelineInfo struct { + id string + url string + sector sql.NullInt64 + commpTaskID sql.NullInt64 + aggTaskID sql.NullInt64 + indexingTaskID sql.NullInt64 + } + + var pipelines []pipelineInfo + for rows.Next() { + var p pipelineInfo + if err := rows.Scan(&p.id, &p.url, &p.sector, &p.commpTaskID, &p.aggTaskID, &p.indexingTaskID); err != nil { + return false, fmt.Errorf("failed to scan pipeline info: %w", err) + } + pipelines = append(pipelines, p) + } + if err := rows.Err(); err != nil { + return false, fmt.Errorf("row iteration error: %w", err) + } + + for _, p := range pipelines { + // Gather task IDs + var taskIDs []int64 + if p.commpTaskID.Valid { + taskIDs = append(taskIDs, p.commpTaskID.Int64) + } + if p.aggTaskID.Valid { + taskIDs = append(taskIDs, p.aggTaskID.Int64) + } + if p.indexingTaskID.Valid { + taskIDs = append(taskIDs, p.indexingTaskID.Int64) + } + + if len(taskIDs) > 0 { + var runningTasks int + err = tx.QueryRow(`SELECT COUNT(*) FROM harmony_task WHERE id = ANY($1)`, taskIDs).Scan(&runningTasks) + if err != nil { + return false, err + } + if runningTasks > 0 { + // This should not happen if they are failed, but just in case + return false, fmt.Errorf("cannot remove deal pipeline %s: tasks are still running", p.id) + } + } + + _, err = tx.Exec(`UPDATE market_mk20_deal SET error = $1 WHERE id = $2`, "Deal pipeline removed by SP", p.id) + if err != nil { + return false, xerrors.Errorf("store deal failure: updating deal pipeline: %w", err) + } + + _, err = tx.Exec(`DELETE FROM market_mk20_pipeline WHERE id = $1`, p.id) + if err != nil { + return false, err + } + + // If sector is null, remove related pieceref + if !p.sector.Valid { + const prefix = "pieceref:" + if strings.HasPrefix(p.url, prefix) { + refIDStr := p.url[len(prefix):] + refID, err := strconv.ParseInt(refIDStr, 10, 64) + if err != nil { + return false, fmt.Errorf("invalid refID in URL for pipeline %s: %v", p.id, err) + } + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) + if err != nil { + return false, fmt.Errorf("failed to remove parked_piece_refs for pipeline %s: %w", p.id, err) + } + } + } + + log.Infow("removed failed pipeline", "id", p.id) + } + + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return err + } + if !didCommit { + return fmt.Errorf("transaction did not commit") + } + + return nil +} + +func (a *WebRPC) AddMarketContract(ctx context.Context, contract, abiString string) error { + if contract == "" { + return fmt.Errorf("empty contract") + } + if abiString == "" { + return fmt.Errorf("empty abi") + } + + if !strings.HasPrefix(contract, "0x") { + return fmt.Errorf("contract must start with 0x") + } + + if !common.IsHexAddress(contract) { + return fmt.Errorf("invalid contract address") + } + + ethabi, err := eabi.JSON(strings.NewReader(abiString)) + if err != nil { + return fmt.Errorf("invalid abi: %w", err) + } + + if len(ethabi.Methods) == 0 { + return fmt.Errorf("invalid abi: no methods") + } + + n, err := a.deps.DB.Exec(ctx, `INSERT INTO ddo_contracts (address, abi) VALUES ($1, $2) ON CONFLICT (address) DO NOTHING`, contract, abiString) + if err != nil { + return xerrors.Errorf("failed to add contract: %w", err) + } + if n == 0 { + return fmt.Errorf("contract already exists") + } + return nil +} + +func (a *WebRPC) UpdateMarketContract(ctx context.Context, contract, abiString string) error { + if contract == "" { + return fmt.Errorf("empty contract") + } + + if abiString == "" { + return fmt.Errorf("empty abi") + } + + if !strings.HasPrefix(contract, "0x") { + return fmt.Errorf("contract must start with 0x") + } + + if !common.IsHexAddress(contract) { + return fmt.Errorf("invalid contract address") + } + + ethabi, err := eabi.JSON(strings.NewReader(abiString)) + if err != nil { + return fmt.Errorf("invalid abi: %w", err) + } + + if len(ethabi.Methods) == 0 { + return fmt.Errorf("invalid abi: no methods") + } + + // Check if contract exists in DB + var count int + err = a.deps.DB.QueryRow(ctx, `SELECT COUNT(*) FROM ddo_contracts WHERE address = $1`, contract).Scan(&count) + if err != nil { + return xerrors.Errorf("failed to check contract: %w", err) + } + if count == 0 { + return fmt.Errorf("contract does not exist") + } + + n, err := a.deps.DB.Exec(ctx, `UPDATE ddo_contracts SET abi = $2 WHERE address = $1`, contract, abiString) + if err != nil { + return xerrors.Errorf("failed to update contract ABI: %w", err) + } + + if n == 0 { + return fmt.Errorf("failed to update the contract ABI") + } + + return nil +} + +func (a *WebRPC) RemoveMarketContract(ctx context.Context, contract string) error { + if contract == "" { + return fmt.Errorf("empty contract") + } + if !strings.HasPrefix(contract, "0x") { + return fmt.Errorf("contract must start with 0x") + } + _, err := a.deps.DB.Exec(ctx, `DELETE FROM ddo_contracts WHERE address = $1`, contract) + if err != nil { + return xerrors.Errorf("failed to remove contract: %w", err) + } + return nil +} + +func (a *WebRPC) ListMarketContracts(ctx context.Context) (map[string]string, error) { + var contracts []struct { + Address string `db:"address"` + Abi string `db:"abi"` + } + err := a.deps.DB.Select(ctx, &contracts, `SELECT address, abi FROM ddo_contracts`) + if err != nil { + return nil, xerrors.Errorf("failed to get contracts from DB: %w", err) + } + + contractMap := make(map[string]string) + for _, contract := range contracts { + contractMap[contract.Address] = contract.Abi + } + + return contractMap, nil +} + +func (a *WebRPC) EnableProduct(ctx context.Context, name string) error { + if name == "" { + return fmt.Errorf("empty product name") + } + + // Check if product exists in market_mk20_products + var count int + err := a.deps.DB.QueryRow(ctx, `SELECT COUNT(*) FROM market_mk20_products WHERE name = $1`, name).Scan(&count) + if err != nil { + return xerrors.Errorf("failed to check product: %w", err) + } + if count == 0 { + return fmt.Errorf("product does not exist") + } + n, err := a.deps.DB.Exec(ctx, `UPDATE market_mk20_products SET enabled = true WHERE name = $1`, name) + if err != nil { + return xerrors.Errorf("failed to enable product: %w", err) + } + if n == 0 { + return fmt.Errorf("failed to enable the product") + } + return nil +} + +func (a *WebRPC) DisableProduct(ctx context.Context, name string) error { + if name == "" { + return fmt.Errorf("empty product name") + } + + // Check if product exists in market_mk20_products + var count int + err := a.deps.DB.QueryRow(ctx, `SELECT COUNT(*) FROM market_mk20_products WHERE name = $1`, name).Scan(&count) + if err != nil { + return xerrors.Errorf("failed to check product: %w", err) + } + if count == 0 { + return fmt.Errorf("product does not exist") + } + n, err := a.deps.DB.Exec(ctx, `UPDATE market_mk20_products SET enabled = false WHERE name = $1`, name) + if err != nil { + return xerrors.Errorf("failed to disable product: %w", err) + } + if n == 0 { + return fmt.Errorf("failed to disable the product") + } + return nil +} + +func (a *WebRPC) ListProducts(ctx context.Context) (map[string]bool, error) { + var products []struct { + Name string `db:"name"` + Enabled bool `db:"enabled"` + } + err := a.deps.DB.Select(ctx, &products, `SELECT name, enabled FROM market_mk20_products`) + if err != nil { + return nil, xerrors.Errorf("failed to get products from DB: %w", err) + } + productMap := make(map[string]bool) + for _, product := range products { + productMap[product.Name] = product.Enabled + } + return productMap, nil +} + +func (a *WebRPC) EnableDataSource(ctx context.Context, name string) error { + if name == "" { + return fmt.Errorf("empty data source name") + } + + // check if datasource exists in market_mk20_data_source + var count int + err := a.deps.DB.QueryRow(ctx, `SELECT COUNT(*) FROM market_mk20_data_source WHERE name = $1`, name).Scan(&count) + if err != nil { + return xerrors.Errorf("failed to check datasource: %w", err) + } + if count == 0 { + return fmt.Errorf("datasource does not exist") + } + n, err := a.deps.DB.Exec(ctx, `UPDATE market_mk20_data_source SET enabled = true WHERE name = $1`, name) + if err != nil { + return xerrors.Errorf("failed to enable datasource: %w", err) + } + if n == 0 { + return fmt.Errorf("failed to enable the datasource") + } + return nil +} + +func (a *WebRPC) DisableDataSource(ctx context.Context, name string) error { + if name == "" { + return fmt.Errorf("empty data source name") + } + // check if datasource exists in market_mk20_data_source + var count int + err := a.deps.DB.QueryRow(ctx, `SELECT COUNT(*) FROM market_mk20_data_source WHERE name = $1`, name).Scan(&count) + if err != nil { + return xerrors.Errorf("failed to check datasource: %w", err) + } + if count == 0 { + return fmt.Errorf("datasource does not exist") + } + n, err := a.deps.DB.Exec(ctx, `UPDATE market_mk20_data_source SET enabled = false WHERE name = $1`, name) + if err != nil { + return xerrors.Errorf("failed to disable datasource: %w", err) + } + if n == 0 { + return fmt.Errorf("failed to disable the datasource") + } + return nil +} + +func (a *WebRPC) ListDataSources(ctx context.Context) (map[string]bool, error) { + var datasources []struct { + Name string `db:"name"` + Enabled bool `db:"enabled"` + } + err := a.deps.DB.Select(ctx, &datasources, `SELECT name, enabled FROM market_mk20_data_source`) + if err != nil { + return nil, xerrors.Errorf("failed to get datasources from DB: %w", err) + } + + datasourceMap := make(map[string]bool) + for _, datasource := range datasources { + datasourceMap[datasource.Name] = datasource.Enabled + } + return datasourceMap, nil +} + +type UploadStatus struct { + ID string `json:"id"` + Status mk20.UploadStatus `json:"status"` +} + +func (a *WebRPC) ChunkUploadStatus(ctx context.Context, idStr string) (*UploadStatus, error) { + id, err := ulid.Parse(idStr) + if err != nil { + return nil, fmt.Errorf("invalid chunk upload id: %w", err) + } + + var status mk20.UploadStatus + + err = a.deps.DB.QueryRow(ctx, `SELECT + COUNT(*) AS total, + COUNT(*) FILTER (WHERE complete) AS complete, + COUNT(*) FILTER (WHERE NOT complete) AS missing, + ARRAY_AGG(chunk ORDER BY chunk) FILTER (WHERE complete) AS completed_chunks, + ARRAY_AGG(chunk ORDER BY chunk) FILTER (WHERE NOT complete) AS incomplete_chunks + FROM + market_mk20_deal_chunk + WHERE + id = $1 + GROUP BY + id;`, id.String()).Scan(&status.TotalChunks, &status.Uploaded, &status.Missing, &status.UploadedChunks, &status.MissingChunks) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return nil, xerrors.Errorf("failed to get chunk upload status: %w", err) + } + return nil, nil + } + + return &UploadStatus{ + ID: idStr, + Status: status, + }, nil +} + +// MK20PDPPipeline represents a record from market_mk20_PDP_pipeline table +type MK20PDPPipeline struct { + ID string `db:"id" json:"id"` + Client string `db:"client" json:"client"` + PieceCidV2 string `db:"piece_cid_v2" json:"piece_cid_v2"` + Indexing bool `db:"indexing" json:"indexing"` + Announce bool `db:"announce" json:"announce"` + AnnouncePayload bool `db:"announce_payload" json:"announce_payload"` + + Downloaded bool `db:"downloaded" json:"downloaded"` + + CommpTaskId sql.NullInt64 `db:"commp_task_id" json:"commp_task_id"` + AfterCommp bool `db:"after_commp" json:"after_commp"` + + DealAggregation int `db:"deal_aggregation" json:"deal_aggregation"` + AggregationIndex int64 `db:"aggr_index" json:"aggr_index"` + AggregationTaskID sql.NullInt64 `db:"agg_task_id" json:"agg_task_id"` + Aggregated bool `db:"aggregated" json:"aggregated"` + + AddPieceTaskID sql.NullInt64 `db:"add_piece_task_id" json:"add_piece_task_id"` + AfterAddPiece bool `db:"after_add_piece" json:"after_add_piece"` + + AfterAddPieceMsg bool `db:"after_add_piece_msg" json:"after_add_piece_msg"` + + SaveCacheTaskID sql.NullInt64 `db:"save_cache_task_id" json:"save_cache_task_id"` + AfterSaveCache bool `db:"after_save_cache" json:"after_save_cache"` + + IndexingCreatedAt sql.NullTime `db:"indexing_created_at" json:"indexing_created_at"` + IndexingTaskId sql.NullInt64 `db:"indexing_task_id" json:"indexing_task_id"` + Indexed bool `db:"indexed" json:"indexed"` + + Complete bool `db:"complete" json:"complete"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + + Miner string `db:"-" json:"miner"` +} + +type MK20PDPDealList struct { + ID string `db:"id" json:"id"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + PieceCidV2 sql.NullString `db:"piece_cid_v2" json:"piece_cid_v2"` + Processed bool `db:"processed" json:"processed"` + Error sql.NullString `db:"error" json:"error"` +} + +func (a *WebRPC) MK20PDPStorageDeals(ctx context.Context, limit int, offset int) ([]*MK20PDPDealList, error) { + var pdpSummaries []*MK20PDPDealList + + err := a.deps.DB.Select(ctx, &pdpSummaries, `SELECT + d.created_at, + d.id, + d.piece_cid_v2, + (d.pdp_v1->>'error')::text AS error, + (d.pdp_v1->>'complete')::boolean as processed + FROM market_mk20_deal d + WHERE d.pdp_v1 IS NOT NULL AND d.pdp_v1 != 'null' + ORDER BY d.created_at DESC + LIMIT $1 OFFSET $2;`, limit, offset) + if err != nil { + return nil, fmt.Errorf("failed to fetch PDP deal list: %w", err) + } + + return pdpSummaries, nil +} + +func (a *WebRPC) MK20PDPPipelines(ctx context.Context, limit int, offset int) ([]*MK20PDPPipeline, error) { + if limit <= 0 { + limit = 25 + } + if limit > 100 { + limit = 100 + } + if offset < 0 { + offset = 0 + } + + var pipelines []*MK20PDPPipeline + err := a.deps.DB.Select(ctx, &pipelines, ` + SELECT + created_at, + id, + client, + piece_cid_v2, + indexing, + announce, + announce_payload, + downloaded, + commp_task_id, + after_commp, + deal_aggregation, + aggr_index, + agg_task_id, + aggregated, + add_piece_task_id, + after_add_piece, + after_add_piece_msg, + save_cache_task_id, + after_save_cache, + indexing_created_at, + indexing_task_id, + indexed, + complete + FROM pdp_pipeline + ORDER BY created_at DESC + LIMIT $1 OFFSET $2`, limit, offset) + if err != nil { + return nil, fmt.Errorf("failed to fetch pdp pipelines: %w", err) + } + + return pipelines, nil +} + +type MK20PDPPipelineFailedStats struct { + DownloadingFailed int64 + CommPFailed int64 + AggFailed int64 + AddPieceFailed int64 + SaveCacheFailed int64 + IndexFailed int64 +} + +func (a *WebRPC) MK20PDPPipelineFailedTasks(ctx context.Context) (*MK20PDPPipelineFailedStats, error) { + // We'll create a similar query, but this time we coalesce the task IDs from harmony_task. + // If the join fails (no matching harmony_task), all joined fields for that task will be NULL. + // We detect failure by checking that xxx_task_id IS NOT NULL, after_xxx = false, and that no task record was found in harmony_task. + + const query = ` + WITH pipeline_data AS ( + SELECT + dp.id, + dp.complete, + dp.commp_task_id, + dp.agg_task_id, + dp.add_piece_task_id, + dp.save_cache_task_id, + dp.indexing_task_id, + dp.after_commp, + dp.aggregated, + dp.after_add_piece, + dp.after_save_cache, + t.downloading_task_id + FROM pdp_pipeline dp + LEFT JOIN market_mk20_download_pipeline mdp + ON mdp.id = dp.id + AND mdp.piece_cid_v2 = dp.piece_cid_v2 + AND mdp.product = $1 + LEFT JOIN LATERAL ( + SELECT pp.task_id AS downloading_task_id + FROM unnest(mdp.ref_ids) AS r(ref_id) + JOIN parked_piece_refs pr ON pr.ref_id = r.ref_id + JOIN parked_pieces pp ON pp.id = pr.piece_id + WHERE pp.complete = FALSE + LIMIT 1 + ) t ON TRUE + WHERE dp.complete = FALSE + ), + tasks AS ( + SELECT p.*, + dt.id AS downloading_tid, + ct.id AS commp_tid, + at.id AS agg_tid, + ap.id as add_piece_tid, + sc.id as save_cache_tid, + it.id AS index_tid + FROM pipeline_data p + LEFT JOIN harmony_task dt ON dt.id = p.downloading_task_id + LEFT JOIN harmony_task ct ON ct.id = p.commp_task_id + LEFT JOIN harmony_task at ON at.id = p.agg_task_id + LEFT JOIN harmony_task ap ON ap.id = p.add_piece_task_id + LEFT JOIN harmony_task sc ON sc.id = p.save_cache_task_id + LEFT JOIN harmony_task it ON it.id = p.indexing_task_id + ) + SELECT + -- Downloading failed: + -- downloading_task_id IS NOT NULL, after_commp = false (haven't completed commp stage), + -- and downloading_tid IS NULL (no harmony_task record) + COUNT(*) FILTER ( + WHERE downloading_task_id IS NOT NULL + AND after_commp = false + AND downloading_tid IS NULL + ) AS downloading_failed, + + -- CommP (verify) failed: + -- commp_task_id IS NOT NULL, after_commp = false, commp_tid IS NULL + COUNT(*) FILTER ( + WHERE commp_task_id IS NOT NULL + AND after_commp = false + AND commp_tid IS NULL + ) AS commp_failed, + + -- Aggregation failed: + -- agg_task_id IS NOT NULL, aggregated = false, agg_tid IS NULL + COUNT(*) FILTER ( + WHERE agg_task_id IS NOT NULL + AND aggregated = false + AND agg_tid IS NULL + ) AS agg_failed, + + -- Add Piece failed: + -- add_piece_task_id IS NOT NULL, after_add_piece = false, add_piece_tid IS NULL + COUNT(*) FILTER ( + WHERE add_piece_task_id IS NOT NULL + AND after_add_piece = false + AND add_piece_tid IS NULL + ) AS add_piece_failed, + + -- Save Cache failed: + -- save_cache_task_id IS NOT NULL, after_save_cache = false, save_cache_tid IS NULL + COUNT(*) FILTER ( + WHERE save_cache_task_id IS NOT NULL + AND after_save_cache = false + AND save_cache_tid IS NULL + ) AS save_cache_failed, + + -- Index failed: + -- indexing_task_id IS NOT NULL and if we assume indexing is after find_deal: + -- If indexing_task_id is set, we are presumably at indexing stage. + -- If index_tid IS NULL (no task found), then it's failed. + -- We don't have after_index, now at indexing. + COUNT(*) FILTER ( + WHERE indexing_task_id IS NOT NULL + AND index_tid IS NULL + AND after_save_cache = true + ) AS index_failed + FROM tasks + ` + + var c []struct { + DownloadingFailed int64 `db:"downloading_failed"` + CommPFailed int64 `db:"commp_failed"` + AggFailed int64 `db:"agg_failed"` + AddPieceFailed int64 `db:"add_piece_failed"` + SaveCacheFailed int64 `db:"save_cache_failed"` + IndexFailed int64 `db:"index_failed"` + } + + err := a.deps.DB.Select(ctx, &c, query, mk20.ProductNamePDPV1) + if err != nil { + return nil, xerrors.Errorf("failed to run failed task query: %w", err) + } + + counts := c[0] + + return &MK20PDPPipelineFailedStats{ + DownloadingFailed: counts.DownloadingFailed, + CommPFailed: counts.CommPFailed, + AggFailed: counts.AggFailed, + AddPieceFailed: counts.AddPieceFailed, + SaveCacheFailed: counts.SaveCacheFailed, + IndexFailed: counts.IndexFailed, + }, nil +} + +func (a *WebRPC) MK20BulkRestartFailedPDPTasks(ctx context.Context, taskType string) error { + didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + var rows *harmonydb.Query + var err error + + switch taskType { + case "downloading": + rows, err = tx.Query(` + SELECT + t.task_id + FROM pdp_pipeline dp + LEFT JOIN market_mk20_download_pipeline mdp + ON mdp.id = dp.id + AND mdp.piece_cid_v2 = dp.piece_cid_v2 + AND mdp.product = $1 + LEFT JOIN LATERAL ( + SELECT pp.task_id + FROM unnest(mdp.ref_ids) AS r(ref_id) + JOIN parked_piece_refs pr ON pr.ref_id = r.ref_id + JOIN parked_pieces pp ON pp.id = pr.piece_id + WHERE pp.complete = FALSE + LIMIT 1 + ) AS t ON TRUE + LEFT JOIN harmony_task h ON h.id = t.task_id + WHERE dp.downloaded = FALSE + AND h.id IS NULL; + `, mk20.ProductNamePDPV1) + case "commp": + rows, err = tx.Query(` + SELECT dp.commp_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.commp_task_id + WHERE dp.complete = false + AND dp.downloaded = true + AND dp.commp_task_id IS NOT NULL + AND dp.after_commp = false + AND h.id IS NULL + `) + case "aggregate": + rows, err = tx.Query(` + SELECT dp.agg_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND dp.after_commp = true + AND dp.agg_task_id IS NOT NULL + AND dp.aggregated = false + AND h.id IS NULL + `) + case "add_piece": + rows, err = tx.Query(` + SELECT dp.add_piece_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.add_piece_task_id + WHERE dp.complete = false + AND dp.aggregated = true + AND dp.add_piece_task_id IS NOT NULL + AND dp.after_add_piece = false + AND h.id IS NULL + `) + case "save_cache": + rows, err = tx.Query(` + SELECT dp.save_cache_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.save_cache_task_id + WHERE dp.complete = false + AND dp.after_add_piece = true + AND dp.after_add_piece_msg = true + AND dp.save_cache_task_id IS NOT NULL + AND dp.after_save_cache = false + AND h.id IS NULL + `) + case "index": + rows, err = tx.Query(` + SELECT dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.indexing_task_id + WHERE dp.complete = false + AND dp.indexing_task_id IS NOT NULL + AND dp.after_save_cache = true + AND h.id IS NULL + `) + default: + return false, fmt.Errorf("unknown task type: %s", taskType) + } + + if err != nil { + return false, fmt.Errorf("failed to query failed tasks: %w", err) + } + defer rows.Close() + + var taskIDs []int64 + for rows.Next() { + var tid int64 + if err := rows.Scan(&tid); err != nil { + return false, fmt.Errorf("failed to scan task_id: %w", err) + } + taskIDs = append(taskIDs, tid) + } + + if err := rows.Err(); err != nil { + return false, fmt.Errorf("row iteration error: %w", err) + } + + for _, taskID := range taskIDs { + var name string + var posted time.Time + var result bool + err = tx.QueryRow(` + SELECT name, posted, result + FROM harmony_task_history + WHERE task_id = $1 + ORDER BY id DESC LIMIT 1 + `, taskID).Scan(&name, &posted, &result) + if errors.Is(err, pgx.ErrNoRows) { + // No history means can't restart this task + continue + } else if err != nil { + return false, fmt.Errorf("failed to query history: %w", err) + } + + // If result=true means the task ended successfully, no restart needed + if result { + continue + } + + log.Infow("restarting task", "task_id", taskID, "name", name) + + _, err = tx.Exec(` + INSERT INTO harmony_task (id, initiated_by, update_time, posted_time, owner_id, added_by, previous_task, name) + VALUES ($1, NULL, NOW(), $2, NULL, $3, NULL, $4) + `, taskID, posted, a.deps.MachineID, name) + if err != nil { + return false, fmt.Errorf("failed to insert harmony_task for task_id %d: %w", taskID, err) + } + } + + // All done successfully, commit the transaction + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return err + } + if !didCommit { + return fmt.Errorf("transaction did not commit") + } + + return nil +} + +func (a *WebRPC) MK20BulkRemoveFailedPDPPipelines(ctx context.Context, taskType string) error { + didCommit, err := a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { + var rows *harmonydb.Query + var err error + + // We'll select pipeline fields directly based on the stage conditions + switch taskType { + case "downloading": + rows, err = tx.Query(` + SELECT + dp.id, + dp.piece_ref, + dp.commp_task_id, + dp.agg_task_id, + dp.add_piece_task_id, + dp.save_cache_task_id, + dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN market_mk20_download_pipeline mdp + ON mdp.id = dp.id + AND mdp.piece_cid_v2 = dp.piece_cid_v2 + AND mdp.product = $1 + LEFT JOIN LATERAL ( + SELECT pp.task_id + FROM unnest(mdp.ref_ids) AS r(ref_id) + JOIN parked_piece_refs pr ON pr.ref_id = r.ref_id + JOIN parked_pieces pp ON pp.id = pr.piece_id + WHERE pp.task_id IS NOT NULL + LIMIT 1 + ) t ON TRUE + LEFT JOIN harmony_task h ON h.id = t.task_id + WHERE dp.complete = FALSE + AND dp.downloaded = FALSE + AND t.task_id IS NOT NULL + AND h.id IS NULL; + `, mk20.ProductNamePDPV1) + case "commp": + rows, err = tx.Query(` + SELECT dp.id, dp.piece_ref, dp.commp_task_id, dp.agg_task_id, dp.add_piece_task_id, dp.save_cache_task_id, dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.commp_task_id + WHERE dp.complete = false + AND dp.downloaded = true + AND dp.commp_task_id IS NOT NULL + AND dp.after_commp = false + AND h.id IS NULL + `) + case "aggregate": + rows, err = tx.Query(` + SELECT dp.id, dp.piece_ref, dp.commp_task_id, dp.agg_task_id, dp.add_piece_task_id, dp.save_cache_task_id, dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND after_commp = true + AND dp.agg_task_id IS NOT NULL + AND dp.aggregated = false + AND h.id IS NULL + `) + case "add_piece": + rows, err = tx.Query(` + SELECT dp.id, dp.piece_ref, dp.commp_task_id, dp.agg_task_id, dp.add_piece_task_id, dp.save_cache_task_id, dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND aggregated = true + AND dp.add_piece_task_id IS NOT NULL + AND dp.after_add_piece = false + AND h.id IS NULL + `) + case "save_cache": + rows, err = tx.Query(` + SELECT dp.id, dp.piece_ref, dp.commp_task_id, dp.agg_task_id, dp.add_piece_task_id, dp.save_cache_task_id, dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.agg_task_id + WHERE dp.complete = false + AND after_add_piece = true + AND after_add_piece_msg = true + AND dp.save_cache_task_id IS NOT NULL + AND dp.after_save_cache = false + AND h.id IS NULL + `) + case "index": + rows, err = tx.Query(` + SELECT dp.id, dp.piece_ref, dp.commp_task_id, dp.agg_task_id, dp.add_piece_task_id, dp.save_cache_task_id, dp.indexing_task_id + FROM pdp_pipeline dp + LEFT JOIN harmony_task h ON h.id = dp.indexing_task_id + WHERE dp.complete = false + AND after_save_cache = true + AND dp.indexing_task_id IS NOT NULL + AND h.id IS NULL + `) + default: + return false, fmt.Errorf("unknown task type: %s", taskType) + } + + if err != nil { + return false, fmt.Errorf("failed to query failed pipelines: %w", err) + } + defer rows.Close() + + type pipelineInfo struct { + id string + refID sql.NullInt64 + commpTaskID sql.NullInt64 + aggTaskID sql.NullInt64 + addPieceTaskID sql.NullInt64 + saveCacheTask sql.NullInt64 + indexingTaskID sql.NullInt64 + } + + var pipelines []pipelineInfo + for rows.Next() { + var p pipelineInfo + if err := rows.Scan(&p.id, &p.refID, &p.commpTaskID, &p.aggTaskID, &p.addPieceTaskID, &p.saveCacheTask, &p.indexingTaskID); err != nil { + return false, fmt.Errorf("failed to scan pdp pipeline info: %w", err) + } + pipelines = append(pipelines, p) + } + if err := rows.Err(); err != nil { + return false, fmt.Errorf("row iteration error: %w", err) + } + + for _, p := range pipelines { + // Gather task IDs + var taskIDs []int64 + if p.commpTaskID.Valid { + taskIDs = append(taskIDs, p.commpTaskID.Int64) + } + if p.aggTaskID.Valid { + taskIDs = append(taskIDs, p.aggTaskID.Int64) + } + if p.addPieceTaskID.Valid { + taskIDs = append(taskIDs, p.addPieceTaskID.Int64) + } + if p.saveCacheTask.Valid { + taskIDs = append(taskIDs, p.saveCacheTask.Int64) + } + if p.indexingTaskID.Valid { + taskIDs = append(taskIDs, p.indexingTaskID.Int64) + } + + if len(taskIDs) > 0 { + var runningTasks int + err = tx.QueryRow(`SELECT COUNT(*) FROM harmony_task WHERE id = ANY($1)`, taskIDs).Scan(&runningTasks) + if err != nil { + return false, err + } + if runningTasks > 0 { + // This should not happen if they are failed, but just in case + return false, fmt.Errorf("cannot remove deal pipeline %s: tasks are still running", p.id) + } + } + + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", p.id) // TODO: Add Correct error + + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + _, err = tx.Exec(`DELETE FROM pdp_pipeline WHERE id = $1`, p.id) + if err != nil { + return false, xerrors.Errorf("failed to clean up pdp pipeline: %w", err) + } + + if p.refID.Valid { + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, p.refID.Int64) + if err != nil { + return false, fmt.Errorf("failed to remove parked_piece_refs for pipeline %s: %w", p.id, err) + } + } + + log.Infow("removed failed PDP pipeline", "id", p.id) + } + + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return err + } + if !didCommit { + return fmt.Errorf("transaction did not commit") + } + + return nil +} + +func (a *WebRPC) MK20PDPPipelineRemove(ctx context.Context, id string) error { + _, err := ulid.Parse(id) + if err != nil { + return err + } + + _, err = a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var pipelines []struct { + Ref sql.NullInt64 `db:"piece_ref"` + + CommpTaskID sql.NullInt64 `db:"commp_task_id"` + AggrTaskID sql.NullInt64 `db:"agg_task_id"` + AddPieceTaskID sql.NullInt64 `db:"add_piece_task_id"` + SaveCacheTask sql.NullInt64 `db:"save_cache_task"` + IndexingTaskID sql.NullInt64 `db:"indexing_task_id"` + } + + err = tx.Select(&pipelines, `SELECT piece_ref, sector, commp_task_id, agg_task_id, indexing_task_id + FROM market_mk20_pipeline WHERE id = $1`, id) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return false, fmt.Errorf("no deal pipeline found with id %s", id) + } + return false, err + } + + if len(pipelines) == 0 { + return false, fmt.Errorf("no deal pipeline found with id %s", id) + } + + // Collect non-null task IDs + var taskIDs []int64 + for _, pipeline := range pipelines { + if pipeline.CommpTaskID.Valid { + taskIDs = append(taskIDs, pipeline.CommpTaskID.Int64) + } + if pipeline.AggrTaskID.Valid { + taskIDs = append(taskIDs, pipeline.AggrTaskID.Int64) + } + if pipeline.AddPieceTaskID.Valid { + taskIDs = append(taskIDs, pipeline.AddPieceTaskID.Int64) + } + if pipeline.SaveCacheTask.Valid { + taskIDs = append(taskIDs, pipeline.SaveCacheTask.Int64) + } + if pipeline.IndexingTaskID.Valid { + taskIDs = append(taskIDs, pipeline.IndexingTaskID.Int64) + } + } + + // Check if any tasks are still running + if len(taskIDs) > 0 { + var runningTasks int + err = tx.QueryRow(`SELECT COUNT(*) FROM harmony_task WHERE id = ANY($1)`, taskIDs).Scan(&runningTasks) + if err != nil { + return false, err + } + if runningTasks > 0 { + return false, fmt.Errorf("cannot remove deal pipeline %s: tasks are still running", id) + } + } + + n, err := tx.Exec(`UPDATE market_mk20_deal + SET pdp_v1 = jsonb_set( + jsonb_set(pdp_v1, '{error}', to_jsonb($1::text), true), + '{complete}', to_jsonb(true), true + ) + WHERE id = $2;`, "Transaction failed", id) // TODO: Add Correct error + + if err != nil { + return false, xerrors.Errorf("failed to update market_mk20_deal: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("expected 1 row to be updated, got %d", n) + } + + _, err = tx.Exec(`DELETE FROM pdp_pipeline WHERE id = $1`, id) + if err != nil { + return false, xerrors.Errorf("failed to clean up pdp pipeline: %w", err) + } + + for _, pipeline := range pipelines { + if pipeline.Ref.Valid { + _, err = tx.Exec(`DELETE FROM parked_piece_refs WHERE ref_id = $1`, pipeline.Ref) + if err != nil { + return false, fmt.Errorf("failed to remove parked_piece_refs for pipeline %s: %w", id, err) + } + } + } + + return true, nil + }, harmonydb.OptionRetry()) + return err +} diff --git a/web/api/webrpc/pdp.go b/web/api/webrpc/pdp.go index c6c2f99a1..6bb9aa51c 100644 --- a/web/api/webrpc/pdp.go +++ b/web/api/webrpc/pdp.go @@ -11,7 +11,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/yugabyte/pgx/v5" - xerrors "golang.org/x/xerrors" + "golang.org/x/xerrors" "github.com/filecoin-project/curio/harmony/harmonydb" ) @@ -164,12 +164,14 @@ func (a *WebRPC) ImportPDPKey(ctx context.Context, hexPrivateKey string) (string // Insert into the database within a transaction _, err = a.deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { // Check if the owner_address already exists - var existingAddress string - err := tx.QueryRow(`SELECT address FROM eth_keys WHERE address = $1 AND role = 'pdp'`, address).Scan(&existingAddress) - if err == nil { + var existingAddress bool + + err := tx.QueryRow(`SELECT EXISTS(SELECT 1 FROM eth_keys WHERE role = 'pdp')`).Scan(&existingAddress) + if err != nil { + return false, xerrors.Errorf("failed to check existing owner address: %v", err) + } + if existingAddress { return false, fmt.Errorf("owner address %s already exists", address) - } else if err != pgx.ErrNoRows { - return false, fmt.Errorf("failed to check existing owner address: %v", err) } // Insert the new owner address and private key diff --git a/web/api/webrpc/sector.go b/web/api/webrpc/sector.go index 2078a3aa7..6d96cf0dc 100644 --- a/web/api/webrpc/sector.go +++ b/web/api/webrpc/sector.go @@ -7,6 +7,7 @@ import ( "time" "github.com/docker/go-units" + "github.com/ipfs/go-cid" "github.com/samber/lo" "github.com/snadrus/must" "golang.org/x/xerrors" @@ -15,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/curio/lib/commcidv2" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/storiface" @@ -115,6 +117,7 @@ type SectorPieceMeta struct { PieceIndex int64 `db:"piece_index"` PieceCid string `db:"piece_cid"` PieceSize int64 `db:"piece_size"` + PieceCidV2 string `db:"-"` DealID *string `db:"deal_id"` DataUrl *string `db:"data_url"` @@ -513,6 +516,20 @@ func (a *WebRPC) SectorInfo(ctx context.Context, sp string, intid int64) (*Secto pieces[i].StrPieceSize = types.SizeStr(types.NewInt(uint64(pieces[i].PieceSize))) pieces[i].StrDataRawSize = types.SizeStr(types.NewInt(uint64(derefOrZero(pieces[i].DataRawSize)))) + pcid, err := cid.Parse(pieces[i].PieceCid) + if err != nil { + return nil, xerrors.Errorf("failed to parse piece cid: %w", err) + } + + if pieces[i].DataRawSize != nil { + pcid2, err := commcidv2.PieceCidV2FromV1(pcid, uint64(*pieces[i].DataRawSize)) + if err != nil { + return nil, xerrors.Errorf("failed to generate piece cid v2: %w", err) + } + + pieces[i].PieceCidV2 = pcid2.String() + } + id, isPiecePark := strings.CutPrefix(derefOrZero(pieces[i].DataUrl), "pieceref:") if !isPiecePark { continue diff --git a/web/static/pages/actor/actor-detail.mjs b/web/static/pages/actor/actor-detail.mjs index 63b57692f..3c4247294 100644 --- a/web/static/pages/actor/actor-detail.mjs +++ b/web/static/pages/actor/actor-detail.mjs @@ -99,7 +99,9 @@ customElements.define('actor-detail', class Actor extends LitElement { Source Config Layers: - ${actorInfo.Summary.CLayers} + + ${actorInfo.Summary.CLayers.map(layer => html`${layer} `)} + Sector Size: @@ -388,6 +390,7 @@ class ActorCharts extends LitElement { const firstAll = this.data.All[0]?.BucketEpoch ?? Infinity; const firstCC = this.data.CC[0]?.BucketEpoch ?? Infinity; const nowEpoch = Math.min(firstAll, firstCC); + const blockDelaySeconds = this.data.BlockDelaySeconds // --------------------------- // 1) EXPIRATION CHART (All vs. CC) @@ -423,7 +426,7 @@ class ActorCharts extends LitElement { }, ], }, - options: this.createChartOptions('Expiration (Count)', 'Count', nowEpoch, allExpData, ccExpData), + options: this.createChartOptions('Expiration (Count)', 'Count', nowEpoch, blockDelaySeconds, allExpData, ccExpData), }; if (!this.chartExpiration) { @@ -455,14 +458,14 @@ class ActorCharts extends LitElement { borderColor: 'rgb(255, 205, 86)', backgroundColor: 'rgba(255, 205, 86, 0.2)', borderWidth: 1, - stepped: true, + stepped: 'after', fill: true, pointRadius: 2, data: allQAPData, }, ], }, - options: this.createChartOptions('Quality-Adjusted Power', 'QAP', nowEpoch, allQAPData), + options: this.createChartOptions('Quality-Adjusted Power', 'QAP', nowEpoch, blockDelaySeconds, allQAPData), }; if (!this.chartQAP) { @@ -494,7 +497,7 @@ class ActorCharts extends LitElement { borderColor: 'rgb(153, 102, 255)', backgroundColor: 'rgba(153, 102, 255, 0.2)', borderWidth: 1, - stepped: true, + stepped: 'after', fill: true, pointRadius: 2, data: allLockedData, @@ -505,6 +508,7 @@ class ActorCharts extends LitElement { 'Vesting Locked Funds', 'Locked Funds (FIL)', nowEpoch, + blockDelaySeconds, allLockedData ), }; @@ -525,10 +529,11 @@ class ActorCharts extends LitElement { * @param {string} chartTitle - The chart title * @param {string} yTitle - Label for Y axis * @param {number} nowEpoch - The earliest epoch we consider "current" + * @param {number} blockDelaySeconds - The BlockDelaySeconds for the build * @param {Array} allData - The data array for the "All" set * @param {Array} [ccData] - Optional data array for the "CC" set */ - createChartOptions(chartTitle, yTitle, nowEpoch, allData, ccData = []) { + createChartOptions(chartTitle, yTitle, nowEpoch, blockDelaySeconds, allData, ccData = []) { return { responsive: true, maintainAspectRatio: false, @@ -544,12 +549,12 @@ class ActorCharts extends LitElement { callbacks: { label: (context) => { const epochVal = context.parsed.x; - const daysOffset = Math.round(((epochVal - nowEpoch) * 30) / 86400); + const daysOffset = Math.round(((epochVal - nowEpoch) * blockDelaySeconds) / 86400); const months = (daysOffset / 30).toFixed(1); let value; if (yTitle === 'QAP') { - value = this.toHumanBytes(context.parsed.y); // For QAP + value = toHumanBytes(context.parsed.y); // For QAP } else if (yTitle === 'Locked Funds (FIL)') { value = this.toHumanFIL(context.parsed.y); // For Vesting } else { @@ -574,7 +579,7 @@ class ActorCharts extends LitElement { }, ticks: { callback: (value) => { - const days = Math.round(((value - nowEpoch) * 30) / 86400); + const days = Math.round(((value - nowEpoch) * blockDelaySeconds) / 86400); return days + 'd'; }, font: { diff --git a/web/static/pages/ipni/ipni_search.mjs b/web/static/pages/ipni/ipni_search.mjs index 7f26412dd..a645fe07e 100644 --- a/web/static/pages/ipni/ipni_search.mjs +++ b/web/static/pages/ipni/ipni_search.mjs @@ -167,7 +167,7 @@ class IpniSearch extends LitElement { Piece CID - ${this.adData.piece_cid} + ${this.adData.piece_cid} Piece Size diff --git a/web/static/pages/market-settings/allow-list.mjs b/web/static/pages/market-settings/allow-list.mjs index 08a1b9c13..4edc87cd3 100644 --- a/web/static/pages/market-settings/allow-list.mjs +++ b/web/static/pages/market-settings/allow-list.mjs @@ -25,7 +25,6 @@ class AllowList extends LitElement { if (Array.isArray(result)) { this.allowList = result; } else { - console.error('GetAllowDenyList did not return an array:', result); this.allowList = []; } } catch (error) { diff --git a/web/static/pages/market/index.html b/web/static/pages/market/index.html index 659815ace..98f8e81de 100644 --- a/web/static/pages/market/index.html +++ b/web/static/pages/market/index.html @@ -4,8 +4,6 @@ Storage Market - - @@ -29,13 +27,6 @@

Storage Market

-
-
-
- -
-
-
@@ -43,13 +34,6 @@

Storage Market

-
-
-
- -
-
-
diff --git a/web/static/pages/market/pending-deals.mjs b/web/static/pages/market/pending-deals.mjs index f22f6e558..cdac22c5e 100644 --- a/web/static/pages/market/pending-deals.mjs +++ b/web/static/pages/market/pending-deals.mjs @@ -58,7 +58,7 @@ class PendingDeals extends LitElement { ${entry.Miner} ${entry.SectorNumber} - ${entry.PieceCID} + ${entry.PieceCID} ${entry.PieceSizeStr} ${entry.CreatedAtStr} diff --git a/web/static/pages/mk12-deal/deal.mjs b/web/static/pages/mk12-deal/deal.mjs index 5d8d3fb26..3ad8a6e48 100644 --- a/web/static/pages/mk12-deal/deal.mjs +++ b/web/static/pages/mk12-deal/deal.mjs @@ -33,17 +33,21 @@ class DealDetails extends LitElement { {property: 'Signed Proposal Cid', value: entry.signed_proposal_cid}, {property: 'Offline', value: entry.offline}, {property: 'Verified', value: entry.verified}, - {property: 'Is Legacy', value: entry.is_legacy}, {property: 'Is DDO', value: entry.is_ddo}, {property: 'Start Epoch', value: html``}, {property: 'End Epoch', value: html``}, {property: 'Client Peer ID', value: html``}, {property: 'Chain Deal ID', value: entry.chain_deal_id}, {property: 'Publish CID', value: entry.publish_cid}, - {property: 'Piece CID', value: html`${entry.piece_cid}`}, + {property: 'Piece CID', value: entry.piece_cid}, + {property: 'Piece CID V2', value: entry.piece_cid_v2 && entry.piece_cid_v2.trim() !== '' + ? html`${entry.piece_cid_v2}` + : 'N/A'}, {property: 'Piece Size', value: entry.piece_size}, + {property: 'Raw Size', value: entry.raw_size || 'N/A'}, {property: 'Fast Retrieval', value: entry.fast_retrieval}, {property: 'Announce To IPNI', value: entry.announce_to_ipni}, + {property: 'Indexed', value: entry.indexed ? 'Yes' : 'No'}, {property: 'Url', value: entry.url}, {property: 'Url Headers', value: html`
diff --git a/web/static/pages/mk12-deal/index.html b/web/static/pages/mk12-deal/index.html index 5f63a80b5..6d03731eb 100644 --- a/web/static/pages/mk12-deal/index.html +++ b/web/static/pages/mk12-deal/index.html @@ -17,7 +17,7 @@
-

Deal Info

+

MK12 Deal Info

diff --git a/web/static/pages/market/deal-pipelines.mjs b/web/static/pages/mk12-deals/deal-pipelines.mjs similarity index 94% rename from web/static/pages/market/deal-pipelines.mjs rename to web/static/pages/mk12-deals/deal-pipelines.mjs index c5b09f63a..9f1fc392e 100644 --- a/web/static/pages/market/deal-pipelines.mjs +++ b/web/static/pages/mk12-deals/deal-pipelines.mjs @@ -40,11 +40,11 @@ class DealPipelines extends LitElement { async loadData() { try { const params = [this.limit, this.offset]; - const deals = await RPCCall('GetDealPipelines', params); - this.deals = deals; + const deals = await RPCCall('GetMK12DealPipelines', params); + this.deals = deals || []; // Load failed tasks data - const failed = await RPCCall('PipelineFailedTasksMarket', []); + const failed = await RPCCall('MK12PipelineFailedTasks', []); this.failedTasks = failed || {}; this.requestUpdate(); @@ -135,7 +135,7 @@ class DealPipelines extends LitElement { this.requestUpdate(); try { - await RPCCall('BulkRestartFailedMarketTasks', [type]); + await RPCCall('MK12BulkRestartFailedMarketTasks', [type]); await this.loadData(); } catch (err) { console.error('Failed to restart tasks:', err); @@ -152,7 +152,7 @@ class DealPipelines extends LitElement { this.requestUpdate(); try { - await RPCCall('BulkRemoveFailedMarketPipelines', [type]); + await RPCCall('MK12BulkRemoveFailedMarketPipelines', [type]); await this.loadData(); } catch (err) { console.error('Failed to remove pipelines:', err); @@ -224,7 +224,10 @@ class DealPipelines extends LitElement { ${deal.miner} - ${this.formatPieceCid(deal.piece_cid)} + ${deal.piece_cid_v2 && deal.piece_cid_v2 !== "" + ? html`${this.formatPieceCid(deal.piece_cid_v2)}` + : html`${this.formatPieceCid(deal.piece_cid)}` + } ${this.formatBytes(deal.piece_size)} ${this.getDealStatus(deal)} diff --git a/web/static/pages/mk12-deals/index.html b/web/static/pages/mk12-deals/index.html index 5f65ce17b..d4c3c1f27 100644 --- a/web/static/pages/mk12-deals/index.html +++ b/web/static/pages/mk12-deals/index.html @@ -1,12 +1,13 @@ - Storage Marker + MK12 Storage Deals + + - @@ -22,24 +23,31 @@

Storage Deals

-
+
- + +
+
+
+
+
+
+
- +
- +
diff --git a/web/static/pages/market/market-asks.mjs b/web/static/pages/mk12-deals/market-asks.mjs similarity index 83% rename from web/static/pages/market/market-asks.mjs rename to web/static/pages/mk12-deals/market-asks.mjs index 670b995df..70d926260 100644 --- a/web/static/pages/market/market-asks.mjs +++ b/web/static/pages/mk12-deals/market-asks.mjs @@ -161,56 +161,56 @@ class MarketAsks extends LitElement { render() { return html` - - - -
-

Storage Asks

- - - - - - - - - - - - - - - - ${this.actorList.map((spID) => { - const ask = this.spAsks.get(spID); - return html` - - - - - - - - - - - - `; - })} - -
SP IDPrice (FIL/TiB/Month)Price (attoFIL/GiB/Epoch)Verified Price (FIL/TiB/Month)Verified Price (attoFIL/GiB/Epoch)Min SizeMax SizeSequenceActions
${ask ? ask.Miner : ''}${ask ? this.attoFilToFilPerTiBPerMonth(ask.Price) : '-'}${ask ? ask.Price : '-'}${ask ? this.attoFilToFilPerTiBPerMonth(ask.VerifiedPrice) : '-'}${ask ? ask.VerifiedPrice : '-'}${ask ? this.formatBytes(ask.MinSize) : '-'}${ask ? this.formatBytes(ask.MaxSize) : '-'}${ask ? ask.Sequence : '-'} - -
- ${this.updatingSpID !== null ? this.renderUpdateForm() : ''} -
- `; + + + +
+

Storage Asks

+ + + + + + + + + + + + + + + + ${this.actorList.map((spID) => { + const ask = this.spAsks.get(spID); + return html` + + + + + + + + + + + + `; + })} + +
SP IDPrice (FIL/TiB/Month)Price (attoFIL/GiB/Epoch)Verified Price (FIL/TiB/Month)Verified Price (attoFIL/GiB/Epoch)Min SizeMax SizeSequenceActions
${ask ? ask.Miner : ''}${ask ? this.attoFilToFilPerTiBPerMonth(ask.Price) : '-'}${ask ? ask.Price : '-'}${ask ? this.attoFilToFilPerTiBPerMonth(ask.VerifiedPrice) : '-'}${ask ? ask.VerifiedPrice : '-'}${ask ? this.formatBytes(ask.MinSize) : '-'}${ask ? this.formatBytes(ask.MaxSize) : '-'}${ask ? ask.Sequence : '-'} + +
+ ${this.updatingSpID !== null ? this.renderUpdateForm() : ''} +
+ `; } renderUpdateForm() { diff --git a/web/static/pages/mk12-deals/mk12-deals.mjs b/web/static/pages/mk12-deals/mk12-deals.mjs index 74b502525..bcf0bf1ee 100644 --- a/web/static/pages/mk12-deals/mk12-deals.mjs +++ b/web/static/pages/mk12-deals/mk12-deals.mjs @@ -89,13 +89,18 @@ class MK12DealList extends LitElement { ${this.deals.map( (deal) => html` - ${formatDate(deal.created_at)} - ${deal.id} - ${deal.miner} - ${deal.piece_cid} - ${this.formatBytes(deal.piece_size)} - - + ${formatDate(deal.created_at)} + ${deal.id} + ${deal.miner} + + ${deal.piece_cid_v2 && deal.piece_cid_v2 !== "" + ? html`${this.formatPieceCid(deal.piece_cid_v2)}` + : html`${this.formatPieceCid(deal.piece_cid)}` + } + + ${this.formatBytes(deal.piece_size)} + + ` )} @@ -133,6 +138,16 @@ class MK12DealList extends LitElement { } } + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + static styles = css` .pagination-controls { display: flex; diff --git a/web/static/pages/mk12-deals/mk12ddo-list.mjs b/web/static/pages/mk12-deals/mk12ddo-list.mjs index 1ccf283a3..609c8e4a9 100644 --- a/web/static/pages/mk12-deals/mk12ddo-list.mjs +++ b/web/static/pages/mk12-deals/mk12ddo-list.mjs @@ -82,20 +82,24 @@ class MK12DDODealList extends LitElement { Piece Size Processed Error - ${this.deals.map( (deal) => html` - ${formatDate(deal.created_at)} - ${deal.id} - ${deal.miner} - ${deal.piece_cid} - ${this.formatBytes(deal.piece_size)} - - + ${formatDate(deal.created_at)} + ${deal.id} + ${deal.miner} + + ${deal.piece_cid_v2 && deal.piece_cid_v2 !== "" + ? html`${this.formatPieceCid(deal.piece_cid_v2)}` + : html`${this.formatPieceCid(deal.piece_cid)}` + } + + ${this.formatBytes(deal.piece_size)} + + ` )} @@ -133,6 +137,16 @@ class MK12DDODealList extends LitElement { } } + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + static styles = css` .pagination-controls { display: flex; diff --git a/web/static/pages/mk20-deal/deal.mjs b/web/static/pages/mk20-deal/deal.mjs new file mode 100644 index 000000000..f0db5906f --- /dev/null +++ b/web/static/pages/mk20-deal/deal.mjs @@ -0,0 +1,262 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +import { formatDate } from '/lib/dateutil.mjs'; +import '/ux/epoch.mjs'; +import '/lib/cu-wallet.mjs'; +import '/ux/yesno.mjs'; + +class DealDetails extends LitElement { + constructor() { + super(); + this.loaddata(); + } + + createRenderRoot() { + return this; // Render into light DOM instead of shadow DOM + } + + + async loaddata() { + try { + const params = new URLSearchParams(window.location.search); + this.data = await RPCCall('MK20DDOStorageDeal', [params.get('id')]); + this.requestUpdate(); + } catch (error) { + console.error('Failed to load deal details:', error); + alert(`Failed to load deal details: ${error.message}`); + } + } + + render() { + if (!this.data) return html`

No data.

`; + + const { identifier, client, data, products, error } = this.data.deal; + + + return html` + + + + + + + + + + + +
Identifier${identifier}
Client
Error
PieceCID + ${data + ? html`${data.piece_cid['/']}` + : "Not Available"} +
+ +

Piece Format

+ ${this.renderPieceFormat(data?.format)} + +

Data Source

+ + + + + + + ${this.renderDataSource(data, identifier)} + +
NameDetails
+ + ${products?.ddo_v1 ? this.renderDDOV1(products.ddo_v1) : ''} + ${products?.pdp_v1 ? this.renderPDPV1(products.pdp_v1) : ''} + ${products?.retrieval_v1 ? this.renderRetV1(products.retrieval_v1) : ''} + `; + } + + renderDataSource(data, id){ + if (!data) return ''; + if (data.source_http) { + return html` + + HTTP + ${data?.source_http ? this.renderSourceHTTP(data.source_http) : ''} + + ` + } + if (data.source_aggregate) { + return html` + + Aggregate + ${data?.source_aggregate ? this.renderSourceAggregate(data.source_aggregate) : ''} + + ` + } + if (data.source_offline) { + return html` + + Offline + ${data?.source_offline ? this.renderSourceOffline(data.source_offline) : ''} + + ` + } + if (data.source_httpput) { + return html` + + HTTP Put + ${data?.source_httpput ? this.renderSourceHttpPut(data.source_httpput) : ''} + + ` + } + } + + renderPieceFormat(format) { + if (!format) return ''; + return html` + + + + + + + ${format.car ? html`` : ''} + ${format.aggregate + ? html` + + ` + : ''} + ${format.raw ? html`` : ''} + +
Format NameDetails
Car
AggregateType ${format.aggregate.type}
Raw
+ `; + } + + renderSourceHTTP(src) { + return html` + + + +
Raw Size${src.rawsize}
${src.urls ? this.renderUrls(src.urls) : ''}
+ `; + } + + renderUrls(urls) { + if (!urls?.length) return ''; + return html` + + + + + + + + + ${urls.map(u => html` + + + ${u.priority} + + + `)} + +
URLHeadersPriorityFallback
${u.url} +
+ [SHOW] +
${JSON.stringify(u.headers, null, 2)}
+
+
${u.fallback}
+ ` + } + + renderSourceAggregate(src) { + return html` +
+ [Aggregate Details] +
+ ${src.pieces.map((piece, i) => html` +
+

+ +

+
+
+
    +
  • PieceCID: ${piece.piece_cid['/']}
  • +
  • ${this.renderPieceFormat(piece.format)}
  • +
  • ${this.renderDataSource(piece)}
  • +
+
+
+
+ `)} +
+
+ `; + } + + renderSourceOffline(src) { + return html` + + +
Raw Size${src.raw_size}
+ `; + } + + renderSourceHttpPut(src) { + return html` + + +
Raw Size${src.raw_size}
+ `; + } + + renderDDOV1(ddo) { + if (!ddo) return ''; + return html` +
DDO v1
+ + + + + ${ddo.allocation_id ? html`` : ''} + + + +
Provider${ddo.provider}
Piece Manager
Duration${ddo.duration}
Allocation ID${ddo.allocation_id}
Contract${ddo.contract_address}
Verify Method${ddo.contract_verify_method}
Notify Address${ddo.notification_address}
+ `; + } + + renderPDPV1(pdp) { + if (!pdp) return ''; + return html` +
PDP V1
+ + + + + + + ${pdp.data_set_id ? html`` : ``} + ${pdp.piece_ids ? html`` : ``} +
Create DataSet
Create Piece
Remove Piece
Remove DataSet
Record Keeper${pdp.record_keeper}>
DataSet ID${pdp.data_set_id}
Piece IDs${pdp.piece_ids}
+ `; + } + + renderRetV1(ret) { + if (!ret) return ''; + return html` +
Retrieval v1
+ + + + +
Indexing${ret.indexing ? 'Yes' : 'No'}
Announce Piece to IPNI${ret.announce_payload ? 'Yes' : 'No'}
Announce Payload to IPNI${ret.announce_payload ? 'Yes' : 'No'}
+ `; + } +} +customElements.define('deal-details', DealDetails); + diff --git a/web/static/pages/mk20-deal/index.html b/web/static/pages/mk20-deal/index.html new file mode 100644 index 000000000..6a8cefe55 --- /dev/null +++ b/web/static/pages/mk20-deal/index.html @@ -0,0 +1,43 @@ + + + + + Deals + + + + + + + + + +
+
+
+
+

MK20 Deal Info

+
+
+
+
+
+
+ +
+
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/web/static/pages/mk20/ddo-pipeline.mjs b/web/static/pages/mk20/ddo-pipeline.mjs new file mode 100644 index 000000000..c1ce081a3 --- /dev/null +++ b/web/static/pages/mk20/ddo-pipeline.mjs @@ -0,0 +1,374 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +import { formatDate } from '/lib/dateutil.mjs'; + +class MK20DealPipelines extends LitElement { + static properties = { + deals: { type: Array }, + limit: { type: Number }, + offset: { type: Number }, + totalCount: { type: Number }, + failedTasks: { type: Object }, + restartingTaskType: { type: String }, + removingTaskType: { type: String } + }; + + constructor() { + super(); + this.deals = []; + this.limit = 25; + this.offset = 0; + this.totalCount = 0; + this.failedTasks = {}; + this.restartingTaskType = ''; + this.removingTaskType = ''; + this.loadData(); + } + + connectedCallback() { + super.connectedCallback(); + // Set up an interval to update data every 5 seconds + this.intervalId = setInterval(() => this.loadData(), 5000); + } + + disconnectedCallback() { + super.disconnectedCallback(); + // Clear the interval when the element is disconnected + clearInterval(this.intervalId); + } + + async loadData() { + try { + const params = [this.limit, this.offset]; + const deals = await RPCCall('MK20DDOPipelines', params); + this.deals = deals || []; + + // Load failed tasks data + const failed = await RPCCall('MK20PipelineFailedTasks', []); + this.failedTasks = failed || {}; + + this.requestUpdate(); + } catch (error) { + console.error('Failed to load deal pipelines or failed tasks:', error); + } + } + + nextPage() { + this.offset += this.limit; + this.loadData(); + } + + prevPage() { + if (this.offset >= this.limit) { + this.offset -= this.limit; + } else { + this.offset = 0; + } + this.loadData(); + } + + renderFailedTasks() { + const { DownloadingFailed, CommPFailed, AggFailed, IndexFailed } = this.failedTasks; + const entries = []; + + const renderLine = (label, count, type) => { + const isRestarting = this.restartingTaskType === type; + const isRemoving = this.removingTaskType === type; + const isWorking = isRestarting || isRemoving; + return html` +
+ ${label} Task: ${count} +
+ + ${isWorking ? 'Working...' : 'Actions'} + + + +
+
+ `; + }; + + if (DownloadingFailed > 0) { + entries.push(renderLine('Downloading', DownloadingFailed, 'downloading')); + } + if (CommPFailed > 0) { + entries.push(renderLine('CommP', CommPFailed, 'commp')); + } + if (AggFailed > 0) { + entries.push(renderLine('Aggregate', AggFailed, 'aggregate')); + } + if (IndexFailed > 0) { + entries.push(renderLine('Index', IndexFailed, 'index')); + } + + if (entries.length === 0) { + return null; + } + + return html` +
+

Failed Tasks

+ ${entries} +
+ `; + } + + async restartFailedTasks(type) { + this.restartingTaskType = type; + this.removingTaskType = ''; + this.requestUpdate(); + + try { + await RPCCall('MK20BulkRestartFailedMarketTasks', [type]); + await this.loadData(); + } catch (err) { + console.error('Failed to restart tasks:', err); + alert(`Failed to restart ${type} tasks: ${err.message || err}`); + } finally { + this.restartingTaskType = ''; + this.requestUpdate(); + } + } + + async removeFailedPipelines(type) { + this.removingTaskType = type; + this.restartingTaskType = ''; + this.requestUpdate(); + + try { + await RPCCall('MK20BulkRemoveFailedMarketPipelines', [type]); + await this.loadData(); + } catch (err) { + console.error('Failed to remove pipelines:', err); + alert(`Failed to remove ${type} pipelines: ${err.message || err}`); + } finally { + this.removingTaskType = ''; + this.requestUpdate(); + } + } + + render() { + return html` + + + +
+ ${this.renderFailedTasks()} +

+ Deal Pipelines + +

+ + + + + + + + + + + + ${this.deals.map( + (deal) => html` + + + + + + + + ` + )} + +
Created AtUUIDSP IDPiece CIDStatus
${formatDate(deal.created_at)} + ${deal.id} + ${deal.miner} + ${this.formatPieceCid(deal.piece_cid_v2)} + ${this.getDealStatus(deal)}
+
+ + Page ${(this.offset / this.limit) + 1} + +
+
+ `; + } + + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + + formatBytes(bytes) { + const units = ['Bytes', 'KiB', 'MiB', 'GiB', 'TiB']; + let i = 0; + let size = bytes; + while (size >= 1024 && i < units.length - 1) { + size /= 1024; + i++; + } + if (i === 0) { + return `${size} ${units[i]}`; + } else { + return `${size.toFixed(2)} ${units[i]}`; + } + } + + getDealStatus(deal) { + if (deal.complete) { + return '(#########) Complete'; + } else if (!deal.complete && deal.announce && deal.indexed) { + return '(########.) Announcing'; + } else if (deal.sealed && !deal.indexed) { + return '(#######..) Indexing'; + } else if (deal.sector?.Valid && !deal.sealed) { + return '(######...) Sealing'; + } else if (deal.aggregated && !deal.sector?.Valid) { + return '(#####....) Assigning Sector'; + } else if (deal.after_commp && !deal.aggregated) { + return '(####.....) Aggregating Deal'; + } else if (deal.downloaded && !deal.after_commp) { + return '(###......) CommP'; + } else if (deal.started && !deal.downloaded) { + return '(##.......) Downloading'; + } else { + return '(#........) Accepted'; + } + } + + static styles = css` + .pagination-controls { + display: flex; + justify-content: space-between; + align-items: center; + margin-top: 1rem; + } + + .info-btn { + position: relative; + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + font-size: 1em; + margin-left: 8px; + } + + .tooltip-text { + display: none; + position: absolute; + top: 50%; + left: 120%; + transform: translateY(-50%); + min-width: 440px; + max-width: 600px; + background-color: #333; + color: #fff; + padding: 8px; + border-radius: 4px; + font-size: 0.8em; + text-align: left; + white-space: normal; + z-index: 10; + } + + .info-btn:hover .tooltip-text { + display: block; + } + + .copy-btn { + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + padding: 0 0 0 5px; + } + + .copy-btn svg { + vertical-align: middle; + } + + .copy-btn:hover { + color: #0d6efd; + } + + .failed-tasks { + margin-bottom: 1rem; + } + .failed-tasks h2 { + margin: 0 0 0.5rem 0; + } + + details > summary { + display: inline-block; + cursor: pointer; + outline: none; + } + + .btn { + margin: 0 4px; + } + `; +} + +customElements.define('mk20-deal-pipelines', MK20DealPipelines); diff --git a/web/static/pages/mk20/ddo.mjs b/web/static/pages/mk20/ddo.mjs new file mode 100644 index 000000000..4dd275294 --- /dev/null +++ b/web/static/pages/mk20/ddo.mjs @@ -0,0 +1,191 @@ +import {css, html, LitElement} from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +import { formatDate } from '/lib/dateutil.mjs'; +import '/ux/yesno.mjs'; + +class MK20DDODealList extends LitElement { + static properties = { + deals: { type: Array }, + limit: { type: Number }, + offset: { type: Number }, + totalCount: { type: Number }, + }; + + constructor() { + super(); + this.deals = []; + this.limit = 25; + this.offset = 0; + this.totalCount = 0; + this.loadData(); + } + + async loadData() { + try { + const params = [this.limit, this.offset]; + this.deals = await RPCCall('MK20DDOStorageDeals', params); + this.requestUpdate(); + } catch (error) { + console.error('Failed to load ddo deals:', error); + } + } + + nextPage() { + this.offset += this.limit; + this.loadData(); + } + + prevPage() { + if (this.offset >= this.limit) { + this.offset -= this.limit; + } else { + this.offset = 0; + } + this.loadData(); + } + + render() { + // Check if there's an error or if the deals array is empty + if (!this.deals || this.deals.length === 0) { + return html``; // Return an empty template if there's no data to render + } + + return html` + + + +
+

+

DDO Deal List + +

+ + + + + + + + + + + + + + + ${this.deals.map( + (deal) => html` + + + + + + + + + ` + )} + +
Created AtIDProviderPiece CIDPiece SizeProcessedError
${formatDate(deal.created_at)}${deal.id}${deal.miner.Valid ? deal.miner.String : '-'} + ${deal.piece_cid_v2 + ? html`${this.formatPieceCid(deal.piece_cid_v2.String)}` + : 'Not Available'} +
+
+ + Page ${(this.offset / this.limit) + 1} + +
+
+ `; + } + + formatBytes(bytes) { + const units = ['Bytes', 'KiB', 'MiB', 'GiB', 'TiB']; + let i = 0; + let size = bytes; + while (size >= 1024 && i < units.length - 1) { + size /= 1024; + i++; + } + if (i === 0) { + return `${size} ${units[i]}`; + } else { + return `${size.toFixed(2)} ${units[i]}`; + } + } + + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + + static styles = css` + .pagination-controls { + display: flex; + justify-content: space-between; + align-items: center; + margin-top: 1rem; + } + + .info-btn { + position: relative; + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + font-size: 1em; + margin-left: 8px; + } + + .tooltip-text { + display: none; + position: absolute; + top: 50%; + left: 120%; /* Position the tooltip to the right of the button */ + transform: translateY(-50%); /* Center the tooltip vertically */ + min-width: 440px; + max-width: 600px; + background-color: #333; + color: #fff; + padding: 8px; + border-radius: 4px; + font-size: 0.8em; + text-align: left; + white-space: normal; + z-index: 10; + } + + .info-btn:hover .tooltip-text { + display: block; + } + `; +} + +customElements.define('mk20-ddo-deal-list', MK20DDODealList); \ No newline at end of file diff --git a/web/static/pages/mk20/deal-search.mjs b/web/static/pages/mk20/deal-search.mjs new file mode 100644 index 000000000..42f6430e0 --- /dev/null +++ b/web/static/pages/mk20/deal-search.mjs @@ -0,0 +1,68 @@ +import { html, css, LitElement } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; + +class DealSearch extends LitElement { + static properties = { + searchTerm: { type: String }, + }; + + constructor() { + super(); + this.searchTerm = ''; + } + + handleInput(event) { + this.searchTerm = event.target.value; + } + + handleSearch() { + if (this.searchTerm.trim() !== '') { + window.location.href = `/pages/mk20-deal/?id=${encodeURIComponent(this.searchTerm.trim())}`; + } + // If searchTerm is empty, do nothing + } + + render() { + return html` + +
+ + +
+ `; + } + + static styles = css` + .search-container { + display: grid; + grid-template-columns: 1fr max-content; + grid-column-gap: 0.75rem; + margin-bottom: 1rem; + } + + .btn { + padding: 0.4rem 1rem; + border: none; + border-radius: 0; + background-color: var(--color-form-default); + color: var(--color-text-primary); + + &:hover, &:focus, &:focus-visible { + background-color: var(--color-form-default-pressed); + color: var(--color-text-secondary); + } + } + `; +} + +customElements.define('deal-search', DealSearch); diff --git a/web/static/pages/mk20/index.html b/web/static/pages/mk20/index.html new file mode 100644 index 000000000..cbc497209 --- /dev/null +++ b/web/static/pages/mk20/index.html @@ -0,0 +1,52 @@ + + + + MK20 Storage Deals + + + + + + + + + +
+
+
+

Storage Deals

+
+
+
+ +
+
+
+
+
+

Settings

+
+
+
+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ +
+
+
+
+
+ + diff --git a/web/static/pages/mk20/settings.mjs b/web/static/pages/mk20/settings.mjs new file mode 100644 index 000000000..01a3bf9e4 --- /dev/null +++ b/web/static/pages/mk20/settings.mjs @@ -0,0 +1,300 @@ +import { html, css, LitElement } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; + +/** + * A custom Web Component for managing products, data sources, and market contracts. + * Extends the LitElement class to leverage the Lit library for efficient rendering. + */ +class MarketManager extends LitElement { + static properties = { + products: { type: Array }, + dataSources: { type: Array }, + contracts: { type: Array }, + selectedContract: { type: Object }, + }; + + constructor() { + super(); + this.products = []; + this.dataSources = []; + this.contracts = []; + this.selectedContract = null; // For modal + this.loadAllData(); + } + + async loadAllData() { + try { + const productsResult = await RPCCall('ListProducts', []); + this.products = Array.isArray(productsResult) + ? productsResult + : Object.entries(productsResult).map(([name, enabled]) => ({ name, enabled })); + + const dataSourcesResult = await RPCCall('ListDataSources', []); + this.dataSources = Array.isArray(dataSourcesResult) + ? dataSourcesResult + : Object.entries(dataSourcesResult).map(([name, enabled]) => ({ name, enabled })); + + // const contractsResult = await RPCCall('ListMarketContracts', []); + // this.contracts = Array.isArray(contractsResult) + // ? contractsResult + // : Object.entries(contractsResult).map(([address, abi]) => ({ address, abi })); + // + // this.requestUpdate(); + } catch (err) { + console.error('Failed to load data:', err); + this.products = []; + this.dataSources = []; + this.contracts = []; + } + } + + async toggleProductState(product) { + const confirmation = confirm( + `Are you sure you want to ${product.enabled ? 'disable' : 'enable'} the product "${product.name}"?` + ); + + if (!confirmation) return; + + try { + if (product.enabled) { + await RPCCall('DisableProduct', [product.name]); + } else { + await RPCCall('EnableProduct', [product.name]); + } + this.loadAllData(); // Refresh after toggling + } catch (err) { + console.error('Failed to toggle product state:', err); + } + } + + async toggleDataSourceState(dataSource) { + const confirmation = confirm( + `Are you sure you want to ${dataSource.enabled ? 'disable' : 'enable'} the data source "${dataSource.name}"?` + ); + + if (!confirmation) return; + + try { + if (dataSource.enabled) { + await RPCCall('DisableDataSource', [dataSource.name]); + } else { + await RPCCall('EnableDataSource', [dataSource.name]); + } + this.loadAllData(); // Refresh after toggling + } catch (err) { + console.error('Failed to toggle data source state:', err); + } + } + + openContractModal(contract) { + this.selectedContract = { ...contract }; + this.updateComplete.then(() => { + const modal = this.shadowRoot.querySelector('#contract-modal'); + if (modal && typeof modal.showModal === 'function') { + modal.showModal(); + } + }); + } + + async removeContract(contract) { + if (!confirm(`Are you sure you want to remove contract ${contract.address}?`)) return; + + try { + await RPCCall('RemoveMarketContract', [contract.address]); + await this.loadAllData(); + } catch (err) { + console.error('Failed to remove contract:', err); + alert(`Failed to remove contract: ${err.message}`); + } + } + + + async saveContractChanges() { + try { + const { address, abi } = this.selectedContract; + + if (!address || !abi) { + alert("Contract address and ABI are required."); + return; + } + + const method = this.contracts.find(c => c.address === address) + ? 'UpdateMarketContract' + : 'AddMarketContract'; + + await RPCCall(method, [address, abi]); + + this.loadAllData(); + this.closeModal(); + } catch (err) { + console.error('Failed to save contract changes:', err); + alert(`Failed to save contract: ${err.message}`); + } + } + + + closeModal() { + this.selectedContract = null; + const modal = this.shadowRoot.querySelector('#contract-modal'); + if (modal) modal.close(); + } + + render() { + return html` + + + +
+ + + + + + ` + )} + +
+

Products

+ + + + + + + + + + ${this.products?.map( + (product) => html` + + + + + + ` + )} + +
NameEnabledAction
${product.name}${product.enabled ? 'Yes' : 'No'} + +
+
+

Data Sources

+ + + + + + + + + + ${this.dataSources?.map( + (source) => html` + + + + + + ` + )} + +
NameEnabledAction
${source.name}${source.enabled ? 'Yes' : 'No'} + +
+
+ + ---> + + + ${this.renderContractModal()} +
+ `; + } + + renderContractModal() { + if (!this.selectedContract) return null; + + return html` + +

+ ${this.contracts.some(c => c.address === this.selectedContract.address) + ? html`Edit Contract: ${this.selectedContract.address}` + : html`Add New Contract`} +

+ +
+ + this.selectedContract.address = e.target.value} + placeholder="0x..." + /> +
+ +
+ + +
+ +
+ + +
+
+ `; + } +} + +customElements.define('market-manager', MarketManager); \ No newline at end of file diff --git a/web/static/pages/pdp/index.html b/web/static/pages/pdp/index.html index 049237a94..bcbd23694 100644 --- a/web/static/pages/pdp/index.html +++ b/web/static/pages/pdp/index.html @@ -1,24 +1,41 @@ - - Node Info - - - - - -
-
-
-

Proof of Data Possession

-
-
-
-
- -
-
-
-
- + + PDP Overview + + + + + + + + +
+
+
+

Proof of Data Possession

+
+
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ +
+
+
+
+
+ diff --git a/web/static/pages/pdp/pdp.mjs b/web/static/pages/pdp/pdp.mjs index 74a05e853..107a81729 100644 --- a/web/static/pages/pdp/pdp.mjs +++ b/web/static/pages/pdp/pdp.mjs @@ -35,65 +35,10 @@ customElements.define('pdp-info', class PDPElement extends LitElement { } } - toggleAddServiceForm() { - this.showAddServiceForm = !this.showAddServiceForm; - } - toggleAddKeyForm() { this.showAddKeyForm = !this.showAddKeyForm; } - async addService(event) { - event.preventDefault(); - - const nameInput = this.shadowRoot.getElementById('service-name'); - const pubKeyInput = this.shadowRoot.getElementById('service-pubkey'); - - const name = nameInput.value.trim(); - const pubKey = pubKeyInput.value.trim(); - - if (!name || !pubKey) { - alert('Please provide both a name and a public key.'); - return; - } - - try { - // Call the RPC method to add the new PDP service - await RPCCall('AddPDPService', [name, pubKey]); - - // Reset the form - nameInput.value = ''; - pubKeyInput.value = ''; - - // Reload the services - await this.loadServices(); - - // Hide the form - this.showAddServiceForm = false; - } catch (error) { - console.error('Failed to add PDP service:', error); - alert('Failed to add PDP service: ' + (error.message || error)); - } - } - - async removeService(serviceId, serviceName) { - const confirmed = confirm(`Are you sure you want to remove the service "${serviceName}"?`); - if (!confirmed) { - return; - } - - try { - // Call the RPC method to remove the PDP service - await RPCCall('RemovePDPService', [serviceId]); - - // Reload the services - await this.loadServices(); - } catch (error) { - console.error('Failed to remove PDP service:', error); - alert('Failed to remove PDP service: ' + (error.message || error)); - } - } - async addKey(event) { event.preventDefault(); @@ -153,58 +98,6 @@ customElements.define('pdp-info', class PDPElement extends LitElement {
-

Services

- ${this.services.length > 0 ? html` - - - - - - - - - - - ${this.services.map(service => html` - - - - - - - `)} - -
IDNamePublic KeyAction
${service.id}${service.name} - - - -
- ` : html` -

No PDP services available.

- `} - - - - ${this.showAddServiceForm ? html` -
-
- - -
-
- - -
- -
- ` : ''} - -
-

Owner Addresses

${this.keys.length > 0 ? html` diff --git a/web/static/pages/pdp/pdp_deals.mjs b/web/static/pages/pdp/pdp_deals.mjs new file mode 100644 index 000000000..e9971f10c --- /dev/null +++ b/web/static/pages/pdp/pdp_deals.mjs @@ -0,0 +1,188 @@ +import {css, html, LitElement} from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +import { formatDate } from '/lib/dateutil.mjs'; +import '/ux/yesno.mjs'; + +class MK20PDPDealList extends LitElement { + static properties = { + deals: { type: Array }, + limit: { type: Number }, + offset: { type: Number }, + totalCount: { type: Number }, + }; + + constructor() { + super(); + this.deals = []; + this.limit = 25; + this.offset = 0; + this.totalCount = 0; + this.loadData(); + } + + async loadData() { + try { + const params = [this.limit, this.offset]; + this.deals = await RPCCall('MK20PDPStorageDeals', params); + this.requestUpdate(); + } catch (error) { + console.error('Failed to load pdp deals:', error); + } + } + + nextPage() { + this.offset += this.limit; + this.loadData(); + } + + prevPage() { + if (this.offset >= this.limit) { + this.offset -= this.limit; + } else { + this.offset = 0; + } + this.loadData(); + } + + render() { + // Check if there's an error or if the deals array is empty + if (!this.deals || this.deals.length === 0) { + return html``; // Return an empty template if there's no data to render + } + + return html` + + + +
+

+

PDP Deal List + +

+ +
+ + + + + + + + + + + ${this.deals.map( + (deal) => html` + + + + + + + + ` + )} + +
Created AtIDPiece CIDProcessedError
${formatDate(deal.created_at)}${deal.id} + ${deal.piece_cid_v2 + ? html`${this.formatPieceCid(deal.piece_cid_v2.String)}` + : 'Not Available'} +
+
+ + Page ${(this.offset / this.limit) + 1} + +
+
+ `; + } + + formatBytes(bytes) { + const units = ['Bytes', 'KiB', 'MiB', 'GiB', 'TiB']; + let i = 0; + let size = bytes; + while (size >= 1024 && i < units.length - 1) { + size /= 1024; + i++; + } + if (i === 0) { + return `${size} ${units[i]}`; + } else { + return `${size.toFixed(2)} ${units[i]}`; + } + } + + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + + static styles = css` + .pagination-controls { + display: flex; + justify-content: space-between; + align-items: center; + margin-top: 1rem; + } + + .info-btn { + position: relative; + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + font-size: 1em; + margin-left: 8px; + } + + .tooltip-text { + display: none; + position: absolute; + top: 50%; + left: 120%; /* Position the tooltip to the right of the button */ + transform: translateY(-50%); /* Center the tooltip vertically */ + min-width: 440px; + max-width: 600px; + background-color: #333; + color: #fff; + padding: 8px; + border-radius: 4px; + font-size: 0.8em; + text-align: left; + white-space: normal; + z-index: 10; + } + + .info-btn:hover .tooltip-text { + display: block; + } + `; +} + +customElements.define('mk20-pdp-deal-list', MK20PDPDealList); \ No newline at end of file diff --git a/web/static/pages/pdp/pipeline.mjs b/web/static/pages/pdp/pipeline.mjs new file mode 100644 index 000000000..764519a8c --- /dev/null +++ b/web/static/pages/pdp/pipeline.mjs @@ -0,0 +1,381 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +import { formatDate } from '/lib/dateutil.mjs'; + +class MK20PDPPipelines extends LitElement { + static properties = { + deals: { type: Array }, + limit: { type: Number }, + offset: { type: Number }, + totalCount: { type: Number }, + failedTasks: { type: Object }, + restartingTaskType: { type: String }, + removingTaskType: { type: String } + }; + + constructor() { + super(); + this.deals = []; + this.limit = 25; + this.offset = 0; + this.totalCount = 0; + this.failedTasks = {}; + this.restartingTaskType = ''; + this.removingTaskType = ''; + this.loadData(); + } + + connectedCallback() { + super.connectedCallback(); + // Set up an interval to update data every 5 seconds + this.intervalId = setInterval(() => this.loadData(), 5000); + } + + disconnectedCallback() { + super.disconnectedCallback(); + // Clear the interval when the element is disconnected + clearInterval(this.intervalId); + } + + async loadData() { + try { + const params = [this.limit, this.offset]; + const deals = await RPCCall('MK20PDPPipelines', params); + this.deals = deals || []; + + // Load failed tasks data + const failed = await RPCCall('MK20PDPPipelineFailedTasks', []); + this.failedTasks = failed || {}; + + this.requestUpdate(); + } catch (error) { + console.error('Failed to load deal pipelines or failed tasks:', error); + } + } + + nextPage() { + this.offset += this.limit; + this.loadData(); + } + + prevPage() { + if (this.offset >= this.limit) { + this.offset -= this.limit; + } else { + this.offset = 0; + } + this.loadData(); + } + + renderFailedTasks() { + const { DownloadingFailed, CommPFailed, AggFailed, AddPieceFailed, SaveCacheFailed, IndexFailed } = this.failedTasks; + const entries = []; + + const renderLine = (label, count, type) => { + const isRestarting = this.restartingTaskType === type; + const isRemoving = this.removingTaskType === type; + const isWorking = isRestarting || isRemoving; + return html` +
+ ${label} Task: ${count} +
+ + ${isWorking ? 'Working...' : 'Actions'} + + + +
+
+ `; + }; + + if (DownloadingFailed > 0) { + entries.push(renderLine('Downloading', DownloadingFailed, 'downloading')); + } + if (CommPFailed > 0) { + entries.push(renderLine('CommP', CommPFailed, 'commp')); + } + if (AggFailed > 0) { + entries.push(renderLine('Aggregate', AggFailed, 'aggregate')); + } + if (AddPieceFailed > 0) { + entries.push(renderLine('AddPiece', AggFailed, 'add_piece')); + } + if (SaveCacheFailed > 0) { + entries.push(renderLine('SaveCache', AggFailed, 'save_cache')); + } + if (IndexFailed > 0) { + entries.push(renderLine('Index', IndexFailed, 'index')); + } + + if (entries.length === 0) { + return null; + } + + return html` +
+

Failed Tasks

+ ${entries} +
+ `; + } + + async restartFailedTasks(type) { + this.restartingTaskType = type; + this.removingTaskType = ''; + this.requestUpdate(); + + try { + await RPCCall('MK20BulkRestartFailedPDPTasks', [type]); + await this.loadData(); + } catch (err) { + console.error('Failed to restart tasks:', err); + alert(`Failed to restart ${type} tasks: ${err.message || err}`); + } finally { + this.restartingTaskType = ''; + this.requestUpdate(); + } + } + + async removeFailedPipelines(type) { + this.removingTaskType = type; + this.restartingTaskType = ''; + this.requestUpdate(); + + try { + await RPCCall('MK20BulkRemoveFailedPDPPipelines', [type]); + await this.loadData(); + } catch (err) { + console.error('Failed to remove pipelines:', err); + alert(`Failed to remove ${type} pipelines: ${err.message || err}`); + } finally { + this.removingTaskType = ''; + this.requestUpdate(); + } + } + + render() { + return html` + + + +
+

+ ${this.renderFailedTasks()} +

+ PDP Pipelines + +

+ + + + + + + + + + + + ${this.deals.map( + (deal) => html` + + + + + + + + ` + )} + +
Created AtUUIDSP IDPiece CIDStatus
${formatDate(deal.created_at)} + ${deal.id} + ${deal.miner} + ${this.formatPieceCid(deal.piece_cid_v2)} + ${this.getDealStatus(deal)}
+
+ + Page ${(this.offset / this.limit) + 1} + +
+
+ `; + } + + formatPieceCid(pieceCid) { + if (!pieceCid) return ''; + if (pieceCid.length <= 24) { + return pieceCid; + } + const start = pieceCid.substring(0, 16); + const end = pieceCid.substring(pieceCid.length - 8); + return `${start}...${end}`; + } + + formatBytes(bytes) { + const units = ['Bytes', 'KiB', 'MiB', 'GiB', 'TiB']; + let i = 0; + let size = bytes; + while (size >= 1024 && i < units.length - 1) { + size /= 1024; + i++; + } + if (i === 0) { + return `${size} ${units[i]}`; + } else { + return `${size.toFixed(2)} ${units[i]}`; + } + } + + getDealStatus(deal) { + if (deal.complete) { + return '(#########) Complete'; + } else if (!deal.complete && deal.announce && deal.indexed) { + return '(########.) Announcing'; + } else if (deal.after_save_cache && !deal.indexed) { + return '(#######..) Indexing'; + } else if (deal.after_add_piece && !deal.after_save_cache) { + return '(######...) Saving Proving Cache'; + } else if (deal.aggregated && !deal.after_add_piece) { + return '(#####....) Adding Piece'; + } else if (deal.after_commp && !deal.aggregated) { + return '(####.....) Aggregating Deal'; + } else if (deal.downloaded && !deal.after_commp) { + return '(###......) CommP'; + } else if (deal.started && !deal.downloaded) { + return '(##.......) Downloading'; + } else { + return '(#........) Accepted'; + } + } + + static styles = css` + .pagination-controls { + display: flex; + justify-content: space-between; + align-items: center; + margin-top: 1rem; + } + + .info-btn { + position: relative; + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + font-size: 1em; + margin-left: 8px; + } + + .tooltip-text { + display: none; + position: absolute; + top: 50%; + left: 120%; + transform: translateY(-50%); + min-width: 440px; + max-width: 600px; + background-color: #333; + color: #fff; + padding: 8px; + border-radius: 4px; + font-size: 0.8em; + text-align: left; + white-space: normal; + z-index: 10; + } + + .info-btn:hover .tooltip-text { + display: block; + } + + .copy-btn { + border: none; + background: transparent; + cursor: pointer; + color: #17a2b8; + padding: 0 0 0 5px; + } + + .copy-btn svg { + vertical-align: middle; + } + + .copy-btn:hover { + color: #0d6efd; + } + + .failed-tasks { + margin-bottom: 1rem; + } + .failed-tasks h2 { + margin: 0 0 0.5rem 0; + } + + details > summary { + display: inline-block; + cursor: pointer; + outline: none; + } + + .btn { + margin: 0 4px; + } + `; +} + +customElements.define('mk20-pdp-pipelines', MK20PDPPipelines); diff --git a/web/static/pages/piece/piece-info.mjs b/web/static/pages/piece/piece-info.mjs index 27489d6ca..104335f6e 100644 --- a/web/static/pages/piece/piece-info.mjs +++ b/web/static/pages/piece/piece-info.mjs @@ -30,7 +30,7 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { // Fetch piece info this.data = await RPCCall('PieceInfo', [pieceCid]); - this.mk12DealData = await RPCCall('MK12DealDetail', [pieceCid]); + this.DealData = await RPCCall('PieceDealDetail', [pieceCid]); this.pieceParkStates = await RPCCall('PieceParkStates', [pieceCid]); // TODO SNAP/POREP pipelines @@ -44,7 +44,7 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { handleRemove(uuid) { if (confirm('Are you sure you want to remove the deal pipeline?')) { - RPCCall('MK12DealPipelineRemove', [uuid]) + RPCCall('DealPipelineRemove', [uuid]) .then(() => { alert('Deal pipeline removed successfully.'); this.loadData(); // Refresh data @@ -79,6 +79,10 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { + + + + @@ -100,7 +104,9 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement {
Piece CID${this.data.piece_cid_v2}
Piece CID V1 ${this.data.piece_cid}
IPNI AD - ${this.data.ipni_ad ? html`${this.data.ipni_ad}` : 'No Ad Found'} + ${this.data.ipni_ads && this.data.ipni_ads.length > 0 + ? this.data.ipni_ads.map(ad => html`${ad} `) + : 'No Ad Found'}
@@ -122,12 +128,19 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { ${this.data.deals.map((item) => html` - ${item.id} + + ${item.mk20 + ? html`${item.id}` + : html`${item.id}`} + ${item.boost_deal ? 'Boost' : (item.legacy_deal ? 'Legacy' : 'DDO')} ${item.miner} ${item.chain_deal_id} - ${item.sector} - ${item.offset} + + ${item.sector > 0 ? html`${item.sector}`: "NA" } + + ${item.offset.Valid ? item.offset.int64 : html`NA`} + ${this.toHumanBytes(item.length)} ${this.toHumanBytes(item.raw_size)} @@ -137,9 +150,9 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { ${this.pieceParkStates ? this.renderPieceParkStates() : ''} - ${this.mk12DealData && this.mk12DealData.length > 0 ? html` -

Related Deals

- ${this.mk12DealData.map((entry) => html` + ${this.DealData?.mk12?.length > 0 ? html` +

Related MK12 Deals

+ ${this.DealData.mk12.map((entry) => html`

Deal ${entry.deal.uuid}

@@ -208,7 +221,9 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { - + @@ -219,7 +234,7 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { `; } })()} - ${entry.pipeline ? html` + ${entry.mk12_pipeline ? html` @@ -227,88 +242,88 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement { - - - - - - - - + + + + + + + + - + - + - + - + - + - + - + - + - + - - - + + + - + @@ -319,6 +334,222 @@ customElements.define('piece-info', class PieceInfoElement extends LitElement {
Top Level Info 📋
${item.miner} ${item.chain_deal_id} ${item.sector}${item.offset} + ${item.offset ? item.offset : html``} + ${this.toHumanBytes(item.length)} ${this.toHumanBytes(item.raw_size)}
PIPELINE ACTIVE
Controls
Created At${formatDate(entry.pipeline.created_at)}
Piece CID${entry.pipeline.piece_cid}
Piece Size${this.toHumanBytes(entry.pipeline.piece_size)}
Raw Size${entry.pipeline.raw_size.Valid ? this.toHumanBytes(entry.pipeline.raw_size.Int64) : 'N/A'}
Offline
URL${entry.pipeline.url.Valid ? entry.pipeline.url.String : 'N/A'}
Headers
${JSON.stringify(entry.pipeline.headers, null, 2)}
Should Index${this.renderNullableYesNo(entry.pipeline.should_index.Bool)}
Created At${formatDate(entry.mk12_pipeline.created_at)}
Piece CID${entry.mk12_pipeline.piece_cid}
Piece Size${this.toHumanBytes(entry.mk12_pipeline.piece_size)}
Raw Size${entry.mk12_pipeline.raw_size.Valid ? this.toHumanBytes(entry.mk12_pipeline.raw_size.Int64) : 'N/A'}
Offline
URL${entry.mk12_pipeline.url.Valid ? entry.mk12_pipeline.url.String : 'N/A'}
Headers
${JSON.stringify(entry.mk12_pipeline.headers, null, 2)}
Should Index${this.renderNullableYesNo(entry.mk12_pipeline.should_index.Bool)}
Announce${this.renderNullableYesNo(entry.pipeline.announce.Bool)}${this.renderNullableYesNo(entry.mk12_pipeline.announce.Bool)}
Progress 🛠️
Data Fetched${this.renderNullableDoneNotDone(entry.pipeline.started.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.started.Bool)}
After Commp${this.renderNullableDoneNotDone(entry.pipeline.after_commp.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.after_commp.Bool)}
After PSD${this.renderNullableDoneNotDone(entry.pipeline.after_psd.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.after_psd.Bool)}
After Find Deal${this.renderNullableDoneNotDone(entry.pipeline.after_find_deal.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.after_find_deal.Bool)}
Sealed${this.renderNullableDoneNotDone(entry.pipeline.sealed.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.sealed.Bool)}
Indexed${this.renderNullableDoneNotDone(entry.pipeline.indexed.Bool)}${this.renderNullableDoneNotDone(entry.mk12_pipeline.indexed.Bool)}
Announced
Early States 🌿
Commp Task ID - ${entry.pipeline.commp_task_id.Valid - ? html`` + ${entry.mk12_pipeline.commp_task_id.Valid + ? html`` : 'N/A'}
PSD Task ID - ${entry.pipeline.psd_task_id.Valid - ? html`` + ${entry.mk12_pipeline.psd_task_id.Valid + ? html`` : 'N/A'}
PSD Wait Time${entry.pipeline.psd_wait_time.Valid ? formatDate(entry.pipeline.psd_wait_time.Time) : 'N/A'}
PSD Wait Time${entry.mk12_pipeline.psd_wait_time.Valid ? formatDate(entry.mk12_pipeline.psd_wait_time.Time) : 'N/A'}
Find Deal Task ID - ${entry.pipeline.find_deal_task_id.Valid - ? html`` + ${entry.mk12_pipeline.find_deal_task_id.Valid + ? html`` : 'N/A'}
Sealing 📦
Sector${entry.pipeline.sector.Valid ? html`${entry.pipeline.sector.Int64}` : 'N/A'}
Reg Seal Proof${entry.pipeline.reg_seal_proof.Valid ? entry.pipeline.reg_seal_proof.Int64 : 'N/A'}
Sector Offset${entry.pipeline.sector_offset.Valid ? entry.pipeline.sector_offset.Int64 : 'N/A'}
Sector${entry.mk12_pipeline.sector.Valid ? html`${entry.mk12_pipeline.sector.Int64}` : 'N/A'}
Reg Seal Proof${entry.mk12_pipeline.reg_seal_proof.Valid ? entry.mk12_pipeline.reg_seal_proof.Int64 : 'N/A'}
Sector Offset${entry.mk12_pipeline.sector_offset.Valid ? entry.mk12_pipeline.sector_offset.Int64 : 'N/A'}
Indexing 🔍
Indexing Created At${entry.pipeline.indexing_created_at.Valid ? formatDate(entry.pipeline.indexing_created_at.Time) : 'N/A'}
Indexing Created At${entry.mk12_pipeline.indexing_created_at.Valid ? formatDate(entry.mk12_pipeline.indexing_created_at.Time) : 'N/A'}
Indexing Task ID - ${entry.pipeline.indexing_task_id.Valid - ? html`` + ${entry.mk12_pipeline.indexing_task_id.Valid + ? html`` : 'N/A'}
`)} ` : ''} + + ${this.DealData?.mk20?.length > 0 ? html` +

Related MK20 Deals

+ ${this.DealData.mk20.map((entry) => html` +

Deal ${entry.deal.deal.identifier}

+ + + + + + + + + + + + ${(() => { + const matchingPieceDeals = this.data.deals.filter(deal => deal.id === entry.deal.uuid); + if (matchingPieceDeals.length > 0) { + return html` + + + `; + } + })()} + ${entry.mk20_ddo_pipeline ? html` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ` : html``} + ${entry.mk20_pdp_pipeline ? html` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ` : html``} + +
Top Level Info 📋
ID${entry.deal.deal.identifier}
Deal Data ⚙️
Piece CID${entry.deal.deal.data.piece_cid['/']}
Piece Size${this.toHumanBytes(entry.deal.deal.data.piece_size)}
Status 🟢️🔴
DDO Error${entry.deal.ddoerr.Valid ? entry.deal.ddoerr.String : 'N/A'}
PDP Error${entry.deal.pdperr.Valid ? entry.deal.pdperr.String : 'N/A'}
Associated Piece Deals 🔗️
+ + + + + + + + + + + + + + + ${matchingPieceDeals.map((item) => html` + + + + + + + + + + + `)} + +
IDDeal TypeMinerChain Deal IDSectorOffsetLengthRaw Size
${item.id}${item.boost_deal ? 'Boost' : (item.legacy_deal ? 'Legacy' : 'DDO')}${item.miner}${item.chain_deal_id}${item.sector}${item.offset}${this.toHumanBytes(item.length)}${this.toHumanBytes(item.raw_size)}
+
DDO PIPELINE ACTIVE
Controls + +
Created At${formatDate(entry.mk20_ddo_pipeline.created_at)}
Piece CID${entry.mk20_ddo_pipeline.piece_cid_v2}
Should Index${this.renderNullableYesNo(entry.mk20_ddo_pipeline.indexing)}
Announce${this.renderNullableYesNo(entry.mk20_ddo_pipeline.announce)}
Progress 🛠️
Data Fetched${this.renderNullableDoneNotDone(entry.mk20_ddo_pipeline.downloaded)}
After Commp${this.renderNullableDoneNotDone(entry.mk20_ddo_pipeline.after_commp)}
Aggregated${this.renderNullableDoneNotDone(entry.mk20_ddo_pipeline.aggregated)}
Sealed${this.renderNullableDoneNotDone(entry.mk20_ddo_pipeline.sealed)}
Indexed${this.renderNullableDoneNotDone(entry.mk20_ddo_pipeline.indexed)}
Announced
Early States 🌿
Commp Task ID + ${entry.mk20_ddo_pipeline.commp_task_id.Valid ? html`` : 'N/A'} +
Aggregation Task ID + ${entry.mk20_ddo_pipeline.agg_task_id.Valid ? html`` : 'N/A'} +
Sealing 📦
Sector${entry.mk20_ddo_pipeline.sector.Valid ? html`${entry.mk20_ddo_pipeline.sector.Int64}` : 'N/A'}
Reg Seal Proof${entry.mk20_ddo_pipeline.reg_seal_proof.Valid ? entry.mk20_ddo_pipeline.reg_seal_proof.Int64 : 'N/A'}
Sector Offset${entry.mk20_ddo_pipeline.sector_offset.Valid ? entry.mk20_ddo_pipeline.sector_offset.Int64 : 'N/A'}
Indexing 🔍
Indexing Created At${entry.mk20_ddo_pipeline.indexing_created_at.Valid ? formatDate(entry.mk20_ddo_pipeline.indexing_created_at.Time) : 'N/A'}
Indexing Task ID + ${entry.mk20_ddo_pipeline.indexing_task_id.Valid ? html`` : 'N/A'} +
No DDO Pipeline Data
PDP PIPELINE ACTIVE
Controls + +
Created At${formatDate(entry.mk20_pdp_pipeline.created_at)}
Piece CID${entry.mk20_pdp_pipeline.piece_cid_v2}
Should Index${this.renderNullableYesNo(entry.mk20_pdp_pipeline.indexing)}
Announce Piece${this.renderNullableYesNo(entry.mk20_pdp_pipeline.announce)}
Announce Payload${this.renderNullableYesNo(entry.mk20_pdp_pipeline.announce_payload)}
Progress 🛠️
Data Fetched${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.downloaded)}
After Commp${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.after_commp)}
Aggregated${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.aggregated)}
Add Piece${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.after_add_piece)}
Add Piece Success${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.after_add_piece_msg)}
Save Cache${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.after_save_cache)}
Indexed${this.renderNullableDoneNotDone(entry.mk20_pdp_pipeline.indexed)}
Announced
Early States 🌿
Commp Task ID + ${entry.mk20_pdp_pipeline.commp_task_id.Valid ? html`` : 'N/A'} +
Aggregation Task ID + ${entry.mk20_pdp_pipeline.agg_task_id.Valid ? html`` : 'N/A'} +
Add Piece Task ID + ${entry.mk20_pdp_pipeline.add_piece_task_id.Valid ? html`` : 'N/A'} +
Save Cache Task ID + ${entry.mk20_pdp_pipeline.save_cache_task_id.Valid ? html`` : 'N/A'} +
Indexing 🔍
Indexing Created At${entry.mk20_pdp_pipeline.indexing_created_at.Valid ? formatDate(entry.mk20_pdp_pipeline.indexing_created_at.Time) : 'N/A'}
Indexing Task ID + ${entry.mk20_pdp_pipeline.indexing_task_id.Valid ? html`` : 'N/A'} +
No PDP Pipeline Data
+ `)} + ` : ''} `; } diff --git a/web/static/pages/sector/sector-info.mjs b/web/static/pages/sector/sector-info.mjs index 4d6efe072..023cb4911 100644 --- a/web/static/pages/sector/sector-info.mjs +++ b/web/static/pages/sector/sector-info.mjs @@ -111,6 +111,7 @@ customElements.define('sector-info',class SectorInfo extends LitElement { + @@ -130,7 +131,12 @@ customElements.define('sector-info',class SectorInfo extends LitElement { ${(this.data.Pieces||[]).map(piece => html` - + + diff --git a/web/static/pages/upload-status/index.html b/web/static/pages/upload-status/index.html new file mode 100644 index 000000000..c99099b27 --- /dev/null +++ b/web/static/pages/upload-status/index.html @@ -0,0 +1,31 @@ + + + + + Deals + + + + + + +
+
+
+
+

Upload Status

+
+
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/web/static/pages/upload-status/status.mjs b/web/static/pages/upload-status/status.mjs new file mode 100644 index 000000000..b61c52fab --- /dev/null +++ b/web/static/pages/upload-status/status.mjs @@ -0,0 +1,94 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; + +class UploadStatus extends LitElement { + constructor() { + super(); + this.loaddata(); + } + + static styles = css` + .chunk-box { + display: grid; + grid-template-columns: repeat(16, auto); + grid-template-rows: repeat(3, auto); + grid-gap: 1px; + } + .chunk-entry { + width: 10px; + height: 10px; + background-color: grey; + margin: 1px; + } + .chunk-complete { + background-color: green; + } + .chunk-missing + background-color: red; + } + ` + + async loaddata() { + try { + const params = new URLSearchParams(window.location.search); + this.data = await RPCCall('ChunkUploadStatus', [params.get('id')]); + this.requestUpdate(); + } catch (error) { + console.error('Failed to load upload status:', error); + alert(`Failed to load upload status: ${error.message}`); + } + } + + render() { + if (!this.data) return html`

No data.

`; + + return html` + + + +
Piece IndexPiece CID V2 Piece CID Piece Size Deal ID
${piece.PieceIndex}${piece.PieceCid} + ${piece.PieceCidV2 && piece.PieceCidV2.trim() !== "" + ? html`${piece.PieceCidV2}` + : 'NA'} + ${piece.PieceCid} ${piece.PieceSize} ${piece.DealID} ${piece.DataUrl}
+ + + + + +
Identifier${this.data.id}
Total Chunks${this.data.status.total_chunks}
Uploaded${this.data.status.uploaded}
Missing${this.data.status.missing}
Status${this.renderChunks(this.data.status)}
+ `; + } + + renderChunks(status) { + const totalChunks = status.total_chunks; + const missingChunks = status.missing_chunks || []; + const uploadedChunksSet = new Set(); + + if (status.uploaded_chunks) { + status.uploaded_chunks.forEach(chunk => uploadedChunksSet.add(chunk)); + } + + // Create chunk entries + const chunkEntries = Array.from({ length: totalChunks }, (_, i) => { + const chunkIndex = i + 1; // Chunks start from 1 + const isMissing = missingChunks.includes(chunkIndex); + const isUploaded = uploadedChunksSet.has(chunkIndex); + + return html` +
+
+ `; + }); + + return html` +
+ ${chunkEntries} +
+ `; + } +} +customElements.define('upload-status', UploadStatus); + diff --git a/web/static/ux/curio-ux.mjs b/web/static/ux/curio-ux.mjs index 904744d49..e69c2913b 100644 --- a/web/static/ux/curio-ux.mjs +++ b/web/static/ux/curio-ux.mjs @@ -199,7 +199,17 @@ class CurioUX extends LitElement { - Storage Deals + MK12 + + +
  • + + + + + + + MK20